input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
downsize_storm_images(
storm_image_matrix=storm_image_matrix,
radar_field_name=radar_field_name, num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
return {
STORM_IMAGE_MATRIX_KEY: storm_image_matrix,
FULL_IDS_KEY: full_id_strings,
VALID_TIMES_KEY: valid_times_unix_sec,
RADAR_FIELD_NAME_KEY: radar_field_name,
RADAR_HEIGHT_KEY: radar_height_m_agl,
ROTATED_GRIDS_KEY: rotated_grids,
ROTATED_GRID_SPACING_KEY: rotated_grid_spacing_metres
}
def find_storm_image_file(
top_directory_name, spc_date_string, radar_source, radar_field_name,
radar_height_m_agl, unix_time_sec=None, raise_error_if_missing=True):
"""Finds file with storm-centered radar images.
If `unix_time_sec is None`, this method finds a file with images for one SPC
date. Otherwise, finds a file with images for one time step.
:param top_directory_name: Name of top-level directory with storm-centered
images.
:param spc_date_string: SPC date (format "yyyymmdd").
:param radar_source: Data source (must be accepted by
`radar_utils.check_data_source`).
:param radar_field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:param radar_height_m_agl: Radar height (metres above ground level).
:param unix_time_sec: [may be None] Time step.
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:return: storm_image_file_name: Path to image file. If file is missing and
`raise_error_if_missing = False`, this is the *expected* path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
# Check input args.
error_checking.assert_is_string(top_directory_name)
time_conversion.spc_date_string_to_unix_sec(spc_date_string)
radar_utils.check_data_source(radar_source)
radar_utils.check_field_name(radar_field_name)
radar_height_m_agl = int(numpy.round(radar_height_m_agl))
error_checking.assert_is_geq(radar_height_m_agl, 0)
error_checking.assert_is_boolean(raise_error_if_missing)
# Find file.
if unix_time_sec is None:
storm_image_file_name = (
'{0:s}/{1:s}/{2:s}/{3:s}/{4:05d}_metres_agl/storm_images_{5:s}.nc'
).format(
top_directory_name, radar_source, spc_date_string[:4],
radar_field_name, radar_height_m_agl, spc_date_string
)
else:
storm_image_file_name = (
'{0:s}/{1:s}/{2:s}/{3:s}/{4:s}/{5:05d}_metres_agl/'
'storm_images_{6:s}.nc'
).format(
top_directory_name, radar_source, spc_date_string[:4],
spc_date_string, radar_field_name, radar_height_m_agl,
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT)
)
if raise_error_if_missing and not os.path.isfile(storm_image_file_name):
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
storm_image_file_name)
raise ValueError(error_string)
return storm_image_file_name
def image_file_name_to_time(storm_image_file_name):
"""Parses time from name of storm-image file.
:param storm_image_file_name: Path to input file.
:return: unix_time_sec: Valid time. If the file contains data for one SPC
date (rather than one time step), this will be None.
:return: spc_date_string: SPC date (format "yyyymmdd").
"""
directory_name, pathless_file_name = os.path.split(storm_image_file_name)
extensionless_file_name, _ = os.path.splitext(pathless_file_name)
time_string = extensionless_file_name.split('_')[-1]
try:
time_conversion.spc_date_string_to_unix_sec(time_string)
return None, time_string
except:
pass
unix_time_sec = time_conversion.string_to_unix_sec(time_string, TIME_FORMAT)
spc_date_string = directory_name.split('/')[-3]
time_conversion.spc_date_string_to_unix_sec(spc_date_string)
return unix_time_sec, spc_date_string
def image_file_name_to_field(storm_image_file_name):
"""Parses radar field from name of storm-image file.
:param storm_image_file_name: Path to input file.
:return: radar_field_name: Name of radar field.
:raises: ValueError: if radar field cannot be parsed from file name.
"""
subdirectory_names = os.path.split(storm_image_file_name)[0].split('/')
for this_subdir_name in subdirectory_names:
try:
radar_utils.check_field_name(this_subdir_name)
return this_subdir_name
except:
pass
error_string = 'Cannot parse radar field from file name: "{0:s}"'.format(
storm_image_file_name)
raise ValueError(error_string)
def image_file_name_to_height(storm_image_file_name):
"""Parses radar height from name of storm-image file.
:param storm_image_file_name: Path to input file.
:return: radar_height_m_agl: Radar height (metres above ground level).
:raises: ValueError: if radar height cannot be parsed from file name.
"""
keyword = '_metres_agl'
subdirectory_names = os.path.split(storm_image_file_name)[0].split('/')
for this_subdir_name in subdirectory_names:
if keyword in this_subdir_name:
return int(this_subdir_name.replace(keyword, ''))
error_string = 'Cannot parse radar height from file name: "{0:s}"'.format(
storm_image_file_name)
raise ValueError(error_string)
def find_storm_label_file(
storm_image_file_name, top_label_dir_name, label_name,
raise_error_if_missing=True, warn_if_missing=True):
"""Finds file with storm-hazard labels.
:param storm_image_file_name: Path to file with storm-centered radar images.
:param top_label_dir_name: Name of top-level directory with hazard labels.
:param label_name: Name of hazard labels.
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:param warn_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = False` and `warn_if_missing = True`, this
method will print a warning.
:return: storm_label_file_name: Path to file with hazard labels. If file is
missing and `raise_error_if_missing = False`, this is the *expected*
path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
error_checking.assert_is_boolean(warn_if_missing)
unix_time_sec, spc_date_string = image_file_name_to_time(
storm_image_file_name)
target_param_dict = target_val_utils.target_name_to_params(label_name)
storm_label_file_name = target_val_utils.find_target_file(
top_directory_name=top_label_dir_name,
event_type_string=target_param_dict[target_val_utils.EVENT_TYPE_KEY],
spc_date_string=spc_date_string, unix_time_sec=unix_time_sec,
raise_error_if_missing=raise_error_if_missing)
if not os.path.isfile(storm_label_file_name) and warn_if_missing:
warning_string = (
'POTENTIAL PROBLEM. Cannot find file. Expected at: "{0:s}"'
).format(storm_label_file_name)
print(warning_string)
return storm_label_file_name
def find_many_files_myrorss_or_mrms(
top_directory_name, radar_source, radar_field_names,
start_time_unix_sec, end_time_unix_sec, one_file_per_time_step=True,
reflectivity_heights_m_agl=None, raise_error_if_all_missing=True,
raise_error_if_any_missing=False):
"""Finds many files with storm-centered images from MYRORSS or MRMS data.
T = number of "file times"
If `one_file_per_time_step = True`, T = number of time steps
Else, T = number of SPC dates
C = number of field/height pairs
:param top_directory_name: Name of top-level directory for storm-centered
images.
:param radar_source: See doc for `_fields_and_heights_to_pairs`.
:param radar_field_names: Same.
:param start_time_unix_sec: Start time. This method will find files for all
times from `start_time_unix_sec`...`end_time_unix_sec`. If
`one_file_per_time_step = False`, start time can be any time on the
first SPC date.
:param end_time_unix_sec: See above.
:param one_file_per_time_step: Boolean flag. If True, this method will seek
one file per field/height and time step. If False, will seek one file
per field/height and SPC date.
:param reflectivity_heights_m_agl: See doc for
`_fields_and_heights_to_pairs`.
:param raise_error_if_all_missing: Boolean flag. If no files are found and
`raise_error_if_all_missing = True`, this method will error out.
:param raise_error_if_any_missing: Boolean flag. If any file is missing and
`raise_error_if_any_missing = True`, will error out.
:return: file_dict: Dictionary with the following keys.
file_dict['image_file_name_matrix']: T-by-C numpy array of paths to image
files.
file_dict['valid_times_unix_sec']: length-T numpy array of valid times. If
`one_file_per_time_step = False`, valid_times_unix_sec[i] is just a time
within the [i]th SPC date.
file_dict['field_name_by_pair']: length-C list with names of radar fields.
file_dict['height_by_pair_m_agl']: length-C numpy array of radar heights
(metres above ground level).
:raises: ValueError: if no files are found and
`raise_error_if_all_missing = True`.
"""
field_name_by_pair, height_by_pair_m_agl = _fields_and_heights_to_pairs(
radar_field_names=radar_field_names,
reflectivity_heights_m_agl=reflectivity_heights_m_agl,
radar_source=radar_source)
first_spc_date_string = time_conversion.time_to_spc_date_string(
start_time_unix_sec)
last_spc_date_string = time_conversion.time_to_spc_date_string(
end_time_unix_sec)
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
error_checking.assert_is_boolean(one_file_per_time_step)
error_checking.assert_is_boolean(raise_error_if_all_missing)
error_checking.assert_is_boolean(raise_error_if_any_missing)
file_dict = {
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: height_by_pair_m_agl
}
if one_file_per_time_step:
image_file_name_matrix = None
valid_times_unix_sec = None
for i in range(len(spc_date_strings)):
print('Finding storm-image files for SPC date "{0:s}"...'.format(
spc_date_strings[i]
))
this_file_name_matrix, these_times_unix_sec = (
_find_many_files_one_spc_date(
top_directory_name=top_directory_name,
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
spc_date_string=spc_date_strings[i],
radar_source=radar_source,
field_name_by_pair=field_name_by_pair,
height_by_pair_m_agl=height_by_pair_m_agl,
raise_error_if_all_missing=False,
raise_error_if_any_missing=raise_error_if_any_missing)
)
if this_file_name_matrix is None:
continue
if image_file_name_matrix is None:
image_file_name_matrix = copy.deepcopy(this_file_name_matrix)
valid_times_unix_sec = these_times_unix_sec + 0
else:
image_file_name_matrix = numpy.concatenate(
(image_file_name_matrix, this_file_name_matrix), axis=0
)
valid_times_unix_sec = numpy.concatenate((
valid_times_unix_sec, these_times_unix_sec
))
if raise_error_if_all_missing and image_file_name_matrix is None:
start_time_string = time_conversion.unix_sec_to_string(
start_time_unix_sec, TIME_FORMAT)
end_time_string = time_conversion.unix_sec_to_string(
end_time_unix_sec, TIME_FORMAT)
error_string = 'Cannot find any files from {0:s} to {1:s}.'.format(
start_time_string, end_time_string)
raise ValueError(error_string)
file_dict.update({
IMAGE_FILE_NAMES_KEY: image_file_name_matrix,
VALID_TIMES_KEY: valid_times_unix_sec
})
return file_dict
image_file_name_matrix = None
valid_spc_date_strings = None
valid_times_unix_sec = None
num_field_height_pairs = len(field_name_by_pair)
for j in range(num_field_height_pairs):
print((
'Finding storm-image files for "{0:s}" at {1:d} metres AGL...'
).format(
field_name_by_pair[j], height_by_pair_m_agl[j]
))
if j == 0:
image_file_names = []
valid_spc_date_strings = []
for i in range(len(spc_date_strings)):
this_file_name = find_storm_image_file(
top_directory_name=top_directory_name,
spc_date_string=spc_date_strings[i],
radar_source=radar_source,
radar_field_name=field_name_by_pair[j],
radar_height_m_agl=height_by_pair_m_agl[j],
raise_error_if_missing=raise_error_if_any_missing)
if not os.path.isfile(this_file_name):
continue
image_file_names.append(this_file_name)
valid_spc_date_strings.append(spc_date_strings[i])
num_times = len(image_file_names)
if num_times == 0:
if raise_error_if_all_missing:
error_string = (
'Cannot find any files from SPC dates "{0:s}" to '
'"{1:s}".'
).format(spc_date_strings[0], spc_date_strings[-1])
raise ValueError(error_string)
file_dict.update({
IMAGE_FILE_NAMES_KEY: None, VALID_TIMES_KEY: None
})
return file_dict
image_file_name_matrix = numpy.full(
(num_times, num_field_height_pairs), '', dtype=object
)
image_file_name_matrix[:, j] = numpy.array(
image_file_names, dtype=object)
valid_times_unix_sec = numpy.array([
time_conversion.spc_date_string_to_unix_sec(s)
for s in valid_spc_date_strings
], dtype=int)
else:
for i in range(len(valid_spc_date_strings)):
image_file_name_matrix[i, j] = find_storm_image_file(
top_directory_name=top_directory_name,
spc_date_string=valid_spc_date_strings[i],
radar_source=radar_source,
radar_field_name=field_name_by_pair[j],
radar_height_m_agl=height_by_pair_m_agl[j],
raise_error_if_missing=raise_error_if_any_missing)
if not os.path.isfile(image_file_name_matrix[i, j]):
image_file_name_matrix[i, j] = ''
file_dict.update({
IMAGE_FILE_NAMES_KEY: image_file_name_matrix,
VALID_TIMES_KEY: valid_times_unix_sec
})
return file_dict
def find_many_files_gridrad(
top_directory_name, radar_field_names, radar_heights_m_agl,
start_time_unix_sec, end_time_unix_sec, one_file_per_time_step=True,
raise_error_if_all_missing=True):
"""Finds many files with storm-centered images from GridRad data.
T = number of "file times"
If `one_file_per_time_step = True`, T = number of time steps
Else, T = number of SPC dates
F = number of radar fields
H = number of radar heights
:param top_directory_name: Name of top-level directory for storm-centered
images.
:param radar_field_names: length-F list with names of radar fields.
:param radar_heights_m_agl: length-H numpy array of radar heights (metres
above ground level).
:param start_time_unix_sec: See doc for `find_many_files_myrorss_or_mrms`.
:param end_time_unix_sec: Same.
:param one_file_per_time_step: Same.
:param raise_error_if_all_missing: Same.
:return: file_dict: Dictionary with the following keys.
file_dict['image_file_name_matrix']: T-by-F-by-H numpy array of paths to
image files.
file_dict['valid_times_unix_sec']: length-T numpy array of valid times. If
`one_file_per_time_step = False`, valid_times_unix_sec[i] is just a time
within the [i]th SPC date.
file_dict['radar_field_names']: Same as input.
file_dict['radar_heights_m_agl']: Same as input.
"""
error_checking.assert_is_numpy_array(
numpy.array(radar_field_names), num_dimensions=1
)
for this_field_name in radar_field_names:
radar_utils.check_field_name(this_field_name)
error_checking.assert_is_numpy_array(radar_heights_m_agl, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(radar_heights_m_agl, 0)
radar_heights_m_agl = numpy.round(radar_heights_m_agl).astype(int)
error_checking.assert_is_boolean(one_file_per_time_step)
error_checking.assert_is_boolean(raise_error_if_all_missing)
if one_file_per_time_step:
all_times_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
time_interval_sec=GRIDRAD_TIME_INTERVAL_SEC, include_endpoint=True)
good_indices = numpy.where(numpy.logical_and(
all_times_unix_sec >= start_time_unix_sec,
all_times_unix_sec <= end_time_unix_sec
))[0]
all_times_unix_sec = all_times_unix_sec[good_indices]
all_spc_date_strings = [
time_conversion.time_to_spc_date_string(t)
for t in all_times_unix_sec
]
else:
first_spc_date_string | |
#!/usr/bin/env python3
import numpy as np
import logging
import os.path
import time
import math
# logger setup
logger = logging.getLogger(__name__)
##### USER DEFINED GENERAL SETTINGS #####
#set new name for each experiment, otherwise files will be overwritten
EXP_NAME = 'cas9pace_20210217_darwin_expt'
EVOLVER_IP = '192.168.1.9'
EVOLVER_PORT = 8081
##### Identify pump calibration files, define initial values for temperature, stirring, volume, power settings
TEMP_INITIAL = [37] * 16 #degrees C, makes 16-value list
#Alternatively enter 16-value list to set different values
#TEMP_INITIAL = [30,30,30,30,32,32,32,32,34,34,34,34,36,36,36,36]
STIR_INITIAL = [8] * 16 #try 8,10,12 etc; makes 16-value list
#Alternatively enter 16-value list to set different values
#STIR_INITIAL = [7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10]
LAGOON_VOLUME = 10 # ml
VOLUME = 30 #mL, determined by vial cap straw length
PUMP_CAL_FILE = 'pump_cal.txt' #tab delimited, mL/s with 16 influx pumps on first row, etc.
OPERATION_MODE = 'chemostat' #use to choose between 'turbidostat' and 'chemostat' functions
# IPP Calibrations. Function of form rate = c*frequency^b
c = [5.016]
b = [.5485]
efflux_addrs = [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 ,30, 31]
##### END OF USER DEFINED GENERAL SETTINGS #####
def hz_to_rate(hz, c, b):
return c * math.pow(hz, b)
def rate_to_hz(rate, c, b):
""" rate should be in ml/h """
return math.pow(((rate) / c), 1.0/b)
def ipp_calculations(elapsed_time, eVOLVER):
# arabinose stock concentration is at 250 mM
# Solenoid addresses for each ipp.
# Each pump requires 3 addresses. These vars capture the 1st of the three (sequential)
v2v_addr = 32
ipp_min_waiting_time = 4
# Sets the minimum amount of time that the experiment must run
# before the IPP selection scheme will start
bolus_amount = .4 # ml
bolus_rate = 10
bolus_time = bolus_amount / hz_to_rate(10, c[0], b[0])
turnover_time = 1 # hours
init_rate = .08
start_rate = 0.04 # V/h
# Start ipp protocol
if (elapsed_time > ipp_min_waiting_time):
if (elapsed_time < ipp_min_waiting_time + bolus_time):
# Start bolus
print("ipp bolus")
rate = bolus_rate
vial = 'all'
else:
rate = rate_to_hz(start_rate * LAGOON_VOLUME, c[0], b[0])
vial = 'all'
print("running ipp cmd. addr: {0}, vial: {1}, rate: {2}".format(v2v_addr, vial, round(rate,3)))
eVOLVER.ipp_command(v2v_addr, vial, round(rate,3))
def turbidostat(eVOLVER, input_data, vials, elapsed_time, run_efflux):
OD_data = input_data['transformed']['od']
##### USER DEFINED VARIABLES #####
turbidostat_vials = vials #vials is all 16, can set to different range (ex. [0,1,2,3]) to only trigger tstat on those vials
stop_after_n_curves = np.inf #set to np.inf to never stop, or integer value to stop diluting after certain number of growth curves
#Alternatively, use 16 value list to set different thresholds, use 9999 for vials not being used
lower_thresh = [0.5, 0.5, 0.5, 0.5, 9999, 9999, 9999, 9999, 0.5, 0.4, 0.5, 0.5, 9999, 9999, 9999, 9999]
upper_thresh = [0.6, 0.6, 0.6, 0.6, 9999, 9999, 9999, 9999, 0.6, 0.5, 0.6, 0.6, 9999, 9999, 9999, 9999]
##### END OF USER DEFINED VARIABLES #####
##### Turbidostat Settings #####
#Tunable settings for overflow protection, pump scheduling etc. Unlikely to change between expts
time_out = 5 #(sec) additional amount of time to run efflux pump
pump_wait = 3 # (min) minimum amount of time to wait between pump events
##### End of Turbidostat Settings #####
save_path = os.path.dirname(os.path.realpath(__file__)) #save path
flow_rate = eVOLVER.get_flow_rate() #read from calibration file
##### Turbidostat Control Code Below #####
#ipp_calculations(elapsed_time, eVOLVER)
# fluidic message: initialized so that no change is sent
MESSAGE = ['--'] * 48
if run_efflux:
for addr in efflux_addrs:
MESSAGE[addr] = 6
for x in turbidostat_vials: #main loop through each vial
# Update turbidostat configuration files for each vial
# initialize OD and find OD path
file_name = "vial{0}_ODset.txt".format(x)
ODset_path = os.path.join(save_path, EXP_NAME, 'ODset', file_name)
data = np.genfromtxt(ODset_path, delimiter=',')
ODset = data[len(data)-1][1]
ODsettime = data[len(data)-1][0]
num_curves=len(data)/2;
file_name = "vial{0}_OD.txt".format(x)
OD_path = os.path.join(save_path, EXP_NAME, 'OD', file_name)
data = np.genfromtxt(OD_path, delimiter=',')
average_OD = 0
# Determine whether turbidostat dilutions are needed
enough_ODdata = (len(data) > 7) #logical, checks to see if enough data points (couple minutes) for sliding window
collecting_more_curves = (num_curves <= (stop_after_n_curves + 2)) #logical, checks to see if enough growth curves have happened
if enough_ODdata:
# Take median to avoid outlier
od_values_from_file = []
for n in range(1,7):
od_values_from_file.append(data[len(data)-n][1])
average_OD = float(np.median(od_values_from_file))
#if recently exceeded upper threshold, note end of growth curve in ODset, allow dilutions to occur and growthrate to be measured
if (average_OD > upper_thresh[x]) and (ODset != lower_thresh[x]):
text_file = open(ODset_path, "a+")
text_file.write("{0},{1}\n".format(elapsed_time,
lower_thresh[x]))
text_file.close()
ODset = lower_thresh[x]
# calculate growth rate
eVOLVER.calc_growth_rate(x, ODsettime, elapsed_time)
#if have approx. reached lower threshold, note start of growth curve in ODset
if (average_OD < (lower_thresh[x] + (upper_thresh[x] - lower_thresh[x]) / 3)) and (ODset != upper_thresh[x]):
text_file = open(ODset_path, "a+")
text_file.write("{0},{1}\n".format(elapsed_time, upper_thresh[x]))
text_file.close()
ODset = upper_thresh[x]
#if need to dilute to lower threshold, then calculate amount of time to pump
if average_OD > ODset and collecting_more_curves:
time_in = - (np.log(lower_thresh[x]/average_OD)*VOLUME)/flow_rate[x]
if time_in > 20:
time_in = 20
time_in = round(time_in, 2)
file_name = "vial{0}_pump_log.txt".format(x)
file_path = os.path.join(save_path, EXP_NAME,
'pump_log', file_name)
data = np.genfromtxt(file_path, delimiter=',')
last_pump = data[len(data)-1][0]
if ((elapsed_time - last_pump)*60) >= pump_wait: # if sufficient time since last pump, send command to Arduino
logger.info('turbidostat dilution for vial %d' % x)
# influx pump
MESSAGE[x] = str(time_in)
# efflux pump
MESSAGE[x + 16] = str(time_in + time_out)
file_name = "vial{0}_pump_log.txt".format(x)
file_path = os.path.join(save_path, EXP_NAME, 'pump_log', file_name)
text_file = open(file_path, "a+")
text_file.write("{0},{1}\n".format(elapsed_time, time_in))
text_file.close()
else:
logger.debug('not enough OD measurements for vial %d' % x)
# send fluidic command only if we are actually turning on any of the pumps
if MESSAGE != ['--'] * 48:
eVOLVER.fluid_command(MESSAGE)
# your_FB_function_here() #good spot to call feedback functions for dynamic temperature, stirring, etc for ind. vials
# your_function_here() #good spot to call non-feedback functions for dynamic temperature, stirring, etc.
# end of turbidostat() fxn
def chemostat(eVOLVER, input_data, vials, elapsed_time, run_efflux):
OD_data = input_data['transformed']['od']
##### USER DEFINED VARIABLES #####
start_OD = 0 # ~OD600, set to 0 to start chemostate dilutions at any positive OD
start_time = 4 #hrs, set 0 to start immediately
# Note that script uses AND logic, so both start time and start OD must be surpassed
chemostat_vials = vials #vials is all 16, can set to different range (ex. [0,1,2,3]) to only trigger tstat on those vials
#rate_config = [1] * 16 #to set all vials to the same value, creates 16-value list
#UNITS of 1/hr, NOT mL/hr, rate = flowrate/volume, so dilution rate ~ growth rate, set to 0 for unused vials
#Alternatively, use 16 value list to set different rates, use 0 for vials not being used
# SLOW PUMP ARRAY FOR PACE: First 8 (0-7) pumps are for media/chemostat, last 8 (7-15) are for vial to vial.
#rate_config = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
rate_config = [1,1,1,1,.25,1,1.25,.5,1,1,1,1,.25,1.25,.75,.75]
##### END OF USER DEFINED VARIABLES #####
##### Chemostat Settings #####
#Tunable settings for bolus, etc. Unlikely to change between expts
bolus = 0.1 #mL, slow rates can do much lower bolus sizes
##### End of Chemostat Settings #####
save_path = os.path.dirname(os.path.realpath(__file__)) #save path
flow_rate = eVOLVER.get_flow_rate() #read from calibration file
period_config = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #initialize array
bolus_in_s = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #initialize array
##### Chemostat Control Code Below #####
for x in chemostat_vials: #main loop through each vial
# Update chemostat configuration files for each vial
#initialize OD and find OD path
file_name = "vial{0}_OD.txt".format(x)
OD_path = os.path.join(save_path, EXP_NAME, 'OD', file_name)
data = np.genfromtxt(OD_path, delimiter=',')
average_OD = 0
enough_ODdata = (len(data) > 7) #logical, checks to see if enough data points (couple minutes) for sliding window
if enough_ODdata: #waits for seven OD measurements (couple minutes) for sliding window
#calculate median OD
od_values_from_file = []
for n in range(1, 7):
od_values_from_file.append(data[len(data)-n][1])
average_OD = float(np.median(od_values_from_file))
# set chemostat config path and pull current state from file
file_name = "vial{0}_chemo_config.txt".format(x)
chemoconfig_path = os.path.join(save_path, EXP_NAME,
'chemo_config', file_name)
chemo_config = np.genfromtxt(chemoconfig_path, delimiter=',')
last_chemoset = chemo_config[len(chemo_config)-1][0] #should t=0 initially, changes each time a new command is written to file
last_chemophase = chemo_config[len(chemo_config)-1][1] #should be zero initially, changes each time a new command is written to file
last_chemorate = chemo_config[len(chemo_config)-1][2] #should be 0 initially, then period in seconds after new commands are sent
| |
-0.08 0.36 0.74 0.05
2 2.20e+05 -30.91 |
2 2.20e+05 -30.91 | -30.91 1.7 1600 0 | -0.11 0.23 0.60 0.04
2 2.48e+05 -30.91 | -59.86 16.5 1600 0 | -0.04 0.17 0.45 0.03
2 2.73e+05 -30.91 | -66.04 29.2 713 724 | -0.03 0.12 -0.53 0.03
2 2.97e+05 -30.91 | -90.05 35.6 783 485 | -0.10 0.16 -1.07 0.03
2 3.22e+05 -30.91 | -38.24 41.4 507 251 | -0.18 0.15 -3.86 0.04
2 3.48e+05 235.88 |
2 3.48e+05 235.88 | 235.88 124.6 1210 376 | -0.06 0.17 -2.53 0.04
2 3.70e+05 235.88 | -79.56 3.8 138 12 | 0.08 0.19 0.85 0.05
2 3.88e+05 235.88 | 171.11 174.6 933 517 | -0.14 0.21 2.51 0.05
2 4.08e+05 322.22 |
2 4.08e+05 322.22 | 322.22 0.6 1291 12 | -0.39 0.20 0.15 0.06
| UsedTime: 1686 | SavedDir: ./BipedalWalker-v3_ReliableSAC_2
| Learner: Save in ./BipedalWalker-v3_ReliableSAC_2
| LearnerPipe.run: ReplayBuffer saving in ./BipedalWalker-v3_ReliableSAC_2
"""
elif env_name == 'Hopper-v2':
env_func = gym.make
env_args = {
'env_num': 1,
'env_name': 'Hopper-v2',
'max_step': 1000,
'state_dim': 11,
'action_dim': 3,
'if_discrete': False,
'target_return': 3800.,
}
args = Arguments(agent, env_func=env_func, env_args=env_args)
args.eval_times = 2 ** 2
args.reward_scale = 2 ** -4
args.target_step = args.max_step * 2
args.worker_num = 2
args.net_dim = 2 ** 8
args.num_layer = 3
args.batch_size = int(args.net_dim * 2)
args.repeat_times = 2 ** 4
args.gamma = 0.993 # todo
args.if_allow_break = False
args.break_step = int(8e6)
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
5 1.61e+04 131.99 | 131.99 3.6 81 2 | 0.03 0.09 0.03 -0.54
5 2.20e+05 391.44 | 391.44 0.3 158 0 | 0.08 0.01 -0.06 -0.75
5 4.25e+05 860.96 | 860.96 11.9 280 5 | 0.09 0.11 0.12 -0.84
5 6.27e+05 3001.43 | 3001.43 7.9 1000 0 | 0.10 0.78 -0.01 -0.85
5 1.64e+06 3203.09 | 3103.14 0.0 1000 0 | 0.10 1.82 -0.06 -0.76
5 2.86e+06 3256.43 | 3152.72 0.0 1000 0 | 0.10 0.75 0.01 -0.67
5 3.88e+06 3256.43 | 1549.69 0.0 512 0 | 0.10 0.86 0.00 -0.71
| UsedTime: 2565 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 1.60e+04 328.68 | 328.68 6.2 262 6 | 0.02 0.01 -0.02 -0.54
2 2.16e+05 2460.57 | 2460.57 14.3 1000 0 | 0.09 0.86 0.20 -0.74
2 6.22e+05 2789.97 | 2788.28 30.9 1000 0 | 0.10 0.40 -0.11 -1.04
2 1.23e+06 3263.16 | 3216.96 0.0 1000 0 | 0.10 1.06 0.12 -1.05
2 2.46e+06 3378.50 | 3364.02 0.0 1000 0 | 0.11 0.87 0.02 -0.92
2 3.90e+06 3397.88 | 3302.80 0.0 1000 0 | 0.11 0.46 0.01 -0.93
| UsedTime: 2557 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
4 2.41e+04 222.39 | 222.39 1.5 120 1 | 0.94 8.45 0.05 -0.55
4 5.34e+05 344.58 | 344.58 0.4 142 0 | 2.41 1.91 0.02 -0.94
4 8.74e+05 540.69 | 540.69 20.1 180 4 | 2.96 5.82 0.00 -1.10
4 1.39e+06 989.51 | 989.51 2.2 308 2 | 3.20 16.75 0.07 -1.08
4 1.73e+06 3161.60 | 3149.35 0.0 1000 0 | 3.26 43.84 -0.02 -1.08
4 2.06e+06 3367.27 | 3105.77 0.0 1000 0 | 3.32 44.14 0.00 -1.13
4 3.92e+06 3604.42 | 3565.39 0.0 1000 0 | 3.44 30.54 0.04 -1.04
4 5.76e+06 3717.06 | 3607.94 0.0 1000 0 | 3.40 51.92 0.07 -0.95
4 6.26e+06 3840.95 | 3409.25 0.0 1000 0 | 3.32 66.48 -0.02 -0.94
| UsedTime: 6251 |
| Arguments Remove cwd: ./Hopper-v2_PPO_4
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
4 4.00e+03 80.27 |
4 4.00e+03 80.27 | 80.27 0.7 50 0 | 0.07 0.92 0.07 0.00
4 1.46e+05 408.17 |
4 1.46e+05 408.17 | 408.17 1.5 145 1 | 0.17 0.03 0.13 0.00
4 2.90e+05 1856.89 |
4 2.90e+05 1856.89 | 1856.89 589.7 692 219 | 0.19 1.01 -0.05 0.00
4 4.34e+05 1856.89 | 1768.29 0.0 544 0 | 0.20 0.19 0.08 0.00
4 5.81e+05 2750.76 |
4 5.81e+05 2750.76 | 2750.76 15.5 1000 0 | 0.18 2.98 0.10 0.00
4 7.24e+05 3036.12 |
4 7.24e+05 3036.12 | 3036.12 2.2 1000 0 | 0.19 2.75 0.05 0.00
4 8.70e+05 3036.12 | 2790.94 0.0 1000 0 | 0.19 2.01 0.05 0.00
4 1.01e+06 3220.42 |
4 1.01e+06 3220.42 | 3220.42 2.7 1000 0 | 0.20 1.70 0.05 0.00
4 1.15e+06 3220.42 | 3059.18 312.9 939 105 | 0.20 1.30 0.05 0.00
4 1.30e+06 3220.42 | 2892.04 0.0 1000 0 | 0.19 2.23 -0.06 0.00
4 1.44e+06 3220.42 | 3153.15 0.0 1000 0 | 0.20 1.04 0.05 0.00
4 1.59e+06 3220.42 | 3083.19 0.0 1000 0 | 0.20 1.40 0.05 0.00
4 1.73e+06 3220.42 | 2999.16 0.0 1000 0 | 0.19 2.53 0.03 0.00
4 1.88e+06 3220.42 | 3219.83 31.4 1000 0 | 0.20 1.11 0.04 0.00
4 2.01e+06 3220.42 | 1465.88 0.0 499 0 | 0.19 2.71 0.22 0.00
4 2.16e+06 3220.42 | 3157.03 0.0 1000 0 | 0.21 0.62 0.08 0.00
4 2.30e+06 3220.42 | 1256.07 0.0 379 0 | 0.20 2.89 0.07 0.00
4 2.45e+06 3265.09 |
4 2.45e+06 3265.09 | 3265.09 14.0 1000 0 | 0.19 3.52 0.01 0.00
4 2.59e+06 3265.09 | 1562.53 0.0 498 0 | 0.18 2.96 0.08 0.00
4 2.73e+06 3265.09 | 3238.68 0.0 1000 0 | 0.20 1.73 0.07 0.00
4 2.87e+06 3265.09 | 3240.99 0.0 1000 0 | 0.20 3.32 -0.16 0.00
4 3.02e+06 3265.09 | 3141.53 0.0 1000 0 | 0.19 3.55 -0.04 0.00
4 3.16e+06 3265.09 | 3252.13 0.0 1000 0 | 0.21 1.44 -0.03 0.00
4 3.30e+06 3265.09 | 3164.95 0.0 1000 0 | 0.20 2.48 0.10 0.00
| UsedTime: 3049 | SavedDir: ./Hopper-v2_PPO_4
| Arguments Remove cwd: ./Hopper-v2_ReliableSAC_3
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 8.09e+03 32.31 |
3 8.09e+03 32.31 | 32.31 0.6 29 0 | 0.05 0.12 0.44 0.05
3 4.86e+04 282.45 |
3 4.86e+04 282.45 | 282.45 2.6 112 1 | 0.14 0.03 6.95 0.07
3 6.93e+04 282.45 | 82.75 0.0 56 0 | 0.16 0.54 31.61 0.28
3 9.00e+04 282.45 | 247.28 0.0 124 0 | 0.12 0.53 54.26 0.26
3 1.06e+05 306.45 |
3 1.06e+05 306.45 | 306.45 1.4 120 0 | 0.15 0.50 55.85 0.38
3 1.18e+05 406.01 |
3 1.18e+05 406.01 | 406.01 2.9 177 1 | 0.14 0.59 61.50 0.51
3 1.31e+05 450.03 |
3 1.31e+05 450.03 | 450.03 5.5 170 2 | 0.12 0.72 66.32 0.43
3 1.47e+05 450.03 | 155.34 0.0 88 0 | 0.13 0.90 87.74 0.47
3 1.64e+05 450.03 | 154.29 0.0 85 0 | 0.15 0.84 83.98 0.59
3 1.76e+05 450.03 | 276.25 0.0 132 0 | 0.13 0.91 91.40 0.46
3 1.89e+05 1351.93 |
3 1.89e+05 1351.93 | 1351.93 360.9 686 202 | 0.14 0.81 87.22 0.47
3 2.01e+05 1351.93 | 278.82 0.0 117 0 | 0.07 1.26 105.55 0.74
3 2.14e+05 1351.93 | 891.33 0.0 659 0 | 0.14 1.23 124.07 0.52
3 2.27e+05 1351.93 | 645.46 0.0 252 0 | 0.10 1.24 103.56 0.56
3 2.39e+05 1351.93 | 231.23 0.0 109 0 | 0.11 2.02 151.70 0.45
3 2.52e+05 1351.93 | 603.45 0.0 239 0 | 0.15 1.75 134.23 0.75
3 2.64e+05 1351.93 | 1030.42 0.0 1000 0 | 0.12 1.13 107.67 0.67
3 2.73e+05 1351.93 | 1047.41 0.0 1000 0 | 0.07 0.74 93.73 0.50
3 2.82e+05 1351.93 | 1111.93 0.0 1000 0 | 0.08 0.62 81.83 0.38
3 2.91e+05 1351.93 | 1039.49 0.0 1000 0 | 0.09 0.52 66.07 0.34
3 3.00e+05 1490.20 |
3 3.00e+05 1490.20 | 1490.20 0.7 1000 0 | 0.08 0.34 55.97 0.23
3 3.09e+05 1526.75 |
3 3.09e+05 1526.75 | 1526.75 36.5 1000 0 | 0.10 0.21 41.72 0.17
3 3.18e+05 1526.75 | 240.00 0.0 107 0 | 0.13 1.18 83.82 0.59
3 3.27e+05 1526.75 | 670.09 0.0 268 0 | 0.12 0.81 107.00 0.44
3 3.35e+05 1526.75 | 712.00 0.0 442 0 | 0.16 0.71 81.89 0.33
3 3.43e+05 1526.75 | 544.57 0.0 276 0 | 0.13 0.43 60.21 0.18
3 3.52e+05 1526.75 | 379.76 0.0 136 0 | 0.16 0.59 66.49 0.25
3 3.61e+05 1526.75 | 1025.86 0.0 1000 0 | 0.17 0.77 98.66 0.46
3 3.69e+05 1526.75 | 1039.17 0.0 1000 0 | 0.06 0.66 88.29 0.39
3 3.77e+05 1782.66 |
3 3.77e+05 1782.66 | 1782.66 2.0 1000 0 | 0.10 0.40 65.66 | |
"""A representation of the configuration form we expect to receive from EpiViz.
The hope is that this form will do as much validation and precondition checking
as is feasible within the constraint that it must be able to validate a full
EpiViz parameter document in significantly less than one second. This is
because it will be used as part of a web service which gates EpiViz submissions
and must return in near real time.
The Configuration class is the root of the form.
"""
import numpy as np
from cascade.core.form import (
Form,
BoolField,
IntField,
FloatField,
StrField,
StringListField,
ListField,
OptionField,
FormList,
Dummy,
)
from cascade.model import priors
from cascade.core.log import getLoggers
CODELOG, MATHLOG = getLoggers(__name__)
class SmoothingPrior(Form):
"""Priors for smoothing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prior_object = None
prior_type = OptionField(["dage", "dtime", "value"])
age_lower = FloatField(nullable=True, display="Age lower")
age_upper = FloatField(nullable=True, display="Age upper")
time_lower = FloatField(nullable=True, display="Time lower")
time_upper = FloatField(nullable=True, display="Time upper")
born_lower = FloatField(nullable=True, display="Born lower")
born_upper = FloatField(nullable=True, display="Born upper")
density = OptionField(
["uniform", "gaussian", "laplace", "students", "log_gaussian", "log_laplace", "log_students"], display="Density"
)
min = FloatField(nullable=True, default=float("-inf"), display="Min")
mean = FloatField(nullable=True, display="Mean")
max = FloatField(nullable=True, default=float("inf"), display="Max")
std = FloatField(nullable=True, display="Std")
nu = FloatField(nullable=True)
eta = FloatField(nullable=True)
def _full_form_validation(self, root): # noqa: C901 too complex
errors = []
if not self.is_field_unset("age_lower") and not self.is_field_unset("age_lower"):
if self.age_lower > self.age_upper:
errors.append("age_lower must be less than or equal to age_upper")
if not self.is_field_unset("time_lower") and not self.is_field_unset("time_lower"):
if self.time_lower > self.time_upper:
errors.append("time_lower must be less than or equal to time_upper")
try:
lower = self.min
upper = self.max
mean = self.mean
if mean is None and (np.isinf(lower) or np.isinf(upper)):
mean = max(lower, 0)
std = self.std
if self.nu is None:
if self.density == "students" and not root.is_field_unset("students_dof"):
nu = root.students_dof.priors
elif self.density == "log_students" and not root.is_field_unset("log_students_dof"):
nu = root.log_students_dof.priors
else:
nu = None
else:
nu = self.nu
if self.eta is None:
if not root.is_field_unset("eta"):
eta = root.eta.priors
else:
eta = None
else:
eta = self.eta
if self.density == "uniform":
self.prior_object = priors.Uniform(lower, upper, mean)
elif self.density == "gaussian":
self.prior_object = priors.Gaussian(mean, std, lower, upper)
elif self.density == "laplace":
self.prior_object = priors.Laplace(mean, std, lower, upper)
elif self.density == "students":
self.prior_object = priors.StudentsT(mean, std, nu, lower, upper)
elif self.density == "log_gaussian":
self.prior_object = priors.LogGaussian(mean, std, eta, lower, upper)
elif self.density == "log_laplace":
self.prior_object = priors.LogLaplace(mean, std, eta, lower, upper)
elif self.density == "log_students":
self.prior_object = priors.LogStudentsT(mean, std, nu, eta, lower, upper)
else:
errors.append(f"Unknown density '{self.density}'")
except priors.PriorError as e:
errors.append(f"Parameters incompatible with density '{self.density}': {str(e)}")
return errors
class SmoothingPriorGroup(Form):
dage = SmoothingPrior(name_field="prior_type", nullable=True, display="Age diff")
dtime = SmoothingPrior(name_field="prior_type", nullable=True, display="Time diff")
value = SmoothingPrior(name_field="prior_type", nullable=True, display="Values")
class Smoothing(Form):
rate = OptionField(["pini", "iota", "rho", "chi", "omega"], "Rate")
location = IntField(nullable=True)
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
age_time_specific = IntField(display="Age and Time specific", nullable=True)
custom_age_grid = Dummy()
custom_time_grid = Dummy()
def _full_form_validation(self, root):
errors = []
if self.rate == "pini":
if not self.is_field_unset("age_grid") and len(self.age_grid) != 1:
errors.append("Pini must have exactly one age point")
else:
age_grid = self.age_grid or root.model.default_age_grid
if len(age_grid) > 1 and self.default.is_field_unset("dage"):
errors.append("You must supply a default age diff prior if the smoothing has extent over age")
time_grid = self.time_grid or root.model.default_time_grid
if len(time_grid) > 1 and self.default.is_field_unset("dtime"):
errors.append("You must supply a default time diff prior if the smoothing has extent over time")
if self._container._name == "rate":
# This validation only makes sense for Fixed Effects not Random Effects
# TODO This repeats validation logic in cascade.model.rates but I don't see a good way to bring that in here
is_negative = True
is_positive = True
for prior in [self.default.value] + [p for p in self.detail or [] if p.prior_type == "value"]:
is_negative = is_negative and prior.min == 0 and prior.max == 0
is_positive = is_positive and prior.min > 0
if prior.min < 0:
errors.append("Rates must be constrained to be >= 0 at all points. Add or correct the lower bound")
break
if self.rate in ["iota", "rho"]:
if not (is_negative or is_positive):
errors.append(f"Rate {self.rate} must be either fully positive or constrained to zero")
return errors
class StudyCovariate(Form):
# Haven't seen if this is a string or an ID for the column in the bundle.
study_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class CountryCovariate(Form):
country_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class Model(Form):
modelable_entity_id = IntField()
model_version_id = IntField(nullable=True)
random_seed = IntField()
minimum_meas_cv = FloatField(nullable=True, display="Data CV floor")
add_csmr_cause = IntField(nullable=True, display="CSMR cause")
title = StrField(nullable=True, display="Title")
description = StrField(nullable=True, display="Description")
bundle_id = IntField(nullable=True, display="Data bundle")
drill = OptionField(["cascade", "drill"], display="Drill")
drill_location = IntField(display="Drill location", nullable=True)
drill_location_start = IntField(display="Drill location start", nullable=True)
drill_location_end = IntField(display="Drill location end", nullable=True)
drill_sex = OptionField([1, 2], constructor=int, nullable=True, display="Drill sex")
birth_prev = OptionField([0, 1], constructor=int, nullable=True, default=0, display="Prevalence at birth")
default_age_grid = StringListField(constructor=float, display="(Cascade) Age grid")
default_time_grid = StringListField(constructor=float, display="(Cascade) Time grid")
constrain_omega = OptionField([0, 1], constructor=int, nullable=False, display="Constrain other cause mortality")
exclude_data_for_param = ListField(constructor=int, nullable=True, display="Exclude data for parameter")
ode_step_size = FloatField(display="ODE step size")
additional_ode_steps = StringListField(constructor=float, nullable=True,
display="Advanced additional ODE steps")
split_sex = OptionField(["most_detailed", "1", "2", "3", "4", "5"], display="Split sex (Being used as Drill Start)")
quasi_fixed = OptionField([0, 1], default=0, constructor=int, nullable=True)
zero_sum_random = ListField(nullable=True, display="Zero-sum random effects")
bound_frac_fixed = FloatField(
default=1e-2, nullable=True,
display="allowed modification to point to move it within bounds"
)
bound_random = FloatField(
nullable=True,
display="allowed modification to point to move it within bounds"
)
rate_case = Dummy()
data_density = StrField(nullable=True, display="Data density")
def _full_form_validation(self, root):
errors = []
if self.drill == "drill":
if self.is_field_unset("drill_sex"):
errors.append("For a drill, please specify Drill sex.")
return errors
class Eta(Form):
priors = FloatField(nullable=True)
data = FloatField(nullable=True)
class DataEta(Form):
integrand_measure_id = IntField(nullable=True)
value = FloatField(nullable=True)
class DataDensity(Form):
value = StrField(nullable=True)
integrand_measure_id = IntField(nullable=True)
class StudentsDOF(Form):
priors = FloatField(nullable=True, default=5)
data = FloatField(nullable=True, default=5)
class DerivativeTest(Form):
fixed = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
random = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
class FixedRandomInt(Form):
fixed = IntField(nullable=True)
random = IntField(nullable=True)
class FixedRandomFloat(Form):
fixed = FloatField(nullable=True)
random = FloatField(nullable=True)
class RandomEffectBound(Form):
location = IntField(nullable=True)
value = FloatField(nullable=True)
class Policies(Form):
estimate_emr_from_prevalence = OptionField(
[0, 1], constructor=int, default=0, display="Estimate EMR from prevalance", nullable=True
)
use_weighted_age_group_midpoints = OptionField([1, 0], default=1, constructor=int, nullable=True)
number_of_fixed_effect_samples = IntField(default=30, nullable=True)
with_hiv = BoolField(default=True, nullable=True, display="Whether to get ASDR with HIV deaths.")
age_group_set_id = IntField(default=12, nullable=True, display="Age groups for analysis work.")
exclude_relative_risk = OptionField([1, 0], default=1, constructor=int, nullable=True)
meas_std_effect = OptionField(
["add_std_scale_all", "add_std_scale_log", "add_var_scale_all", "add_var_scale_log"],
default="add_var_scale_log",
display="Measurement standard deviation effect",
nullable=True
)
limited_memory_max_history_fixed = IntField(
default=30, nullable=True,
display="number of most recent iterations taken into account for quasi-Newton"
)
fit_strategy = OptionField(["fit", "fit_fixed_then_fit"], default="fit", constructor=int, nullable=True)
decomp_step = StrField(nullable=True, default="step1")
gbd_round_id = IntField(nullable=True, default=6)
class Configuration(Form):
""" The root Form of the whole configuration tree.
Example:
>>> input_data = json.loads(json_blob)
>>> form = Configuration(input_data)
>>> errors = form.validate_and_normalize()
>>> if errors:
print(errors)
raise Exception("Woops")
else:
print(f"Ready to configure a model for {form.model.modelable_entity_id}")
"""
model = Model(display="Model", validation_priority=5)
policies = Policies(display="Policies")
gbd_round_id = IntField(display="GBD Round ID")
random_effect = FormList(Smoothing, nullable=True, display="Random effects")
rate = FormList(Smoothing, display="Rates")
study_covariate = FormList(StudyCovariate, display="Study covariates")
country_covariate = FormList(CountryCovariate, display="Country covariates")
eta = Eta(validation_priority=5)
students_dof = StudentsDOF(validation_priority=5)
log_students_dof = StudentsDOF(validation_priority=5)
csmr_cod_output_version_id = IntField()
# Unclear how this differs from csmr_cod_output_version_id. Has same value.
csmr_mortality_output_version_id = Dummy()
location_set_version_id = IntField(default=429, nullable=True)
min_cv = FormList(Dummy)
min_cv_by_rate = FormList(Dummy)
re_bound_location = FormList(RandomEffectBound)
| |
+
"To be overriden by derived class method")
class UKNA_BSEN1991_2_crowd(SteadyStateCrowdLoading):
"""
Class to implement steady state crowd loading analysis
to UK NA to BS EN1991-2
"""
def __init__(self,
bridgeClass:str=None,
crowd_density:float=None,
load_direction='vertical',
**kwargs):
"""
Initialisation function
***
Optional:
* `bridgeClass`, string character ('A' to 'D'), defines bridge class
per UK NA to BS EN1991-2
* `crowd_density`, float, defines crowd density (P/m2). Note only used
if `bridgeClass=None`
* `load_direction`, string, either 'vertical' or 'lateral' required
"""
if bridgeClass is None:
if crowd_density is None:
raise ValueError("Either `bridgeClass` or `crowd_density` required")
else:
if not isinstance(bridgeClass,str):
raise ValueError("`bridgeClass` to be string character")
else:
# correct to uppercase
bridgeClass = bridgeClass.upper()
self.bridgeClass = bridgeClass
"""
Bridge class per Table NA.7
"""
self.crowd_density = crowd_density
"""
Crowd density (P/m2)
"""
self.load_direction = load_direction
"""
Direction of applied crowd loading
"""
# Run parent init function
super().__init__(**kwargs)
def get_crowd_density(self,
verbose=True,
saveAsAttr=True):
"""
Get crowd density according from Table NA.7, UK NA to BS EN
1991-2, according to `bridgeClass`
Returns density expressed as persons/m2
"""
# Define crowd density via bridge class, if defined
if self.bridgeClass is not None:
bridgeClass = self.bridgeClass
if bridgeClass == 'A':
density=0.0
elif bridgeClass == 'B':
density=0.4
elif bridgeClass == 'C':
density=0.8
elif bridgeClass == 'D':
density=1.5
else:
raise ValueError("Invalid 'bridgeClass'!")
# Otherwise use pre-defined density
else:
density = self.crowd_density
self.crowd_density = density
if verbose:
print("Bridge class: '%s'" % bridgeClass)
print("Crowd density (persons/m2): %.1f" % density)
if density==0:
raise ValueError("Crowd density = 0; no analysis required!")
return density
def calc_load_intensity(self,
mode_index:int,
fv:float=None,
load_direction=None,
calc_lambda=True,
verbose=True,
makePlot=True):
"""
Calculates required load intensity (N/m2) to UK NA to BS EN 1991-2

"""
if load_direction is None:
load_direction = self.load_direction
# Retrieve attributes
A = self.deck_area
max_modeshape = self.max_modeshape[mode_index]
# Get crowd density according to bridgeClass
# Crowd density, expressed in persons/m2
crowd_density = self.get_crowd_density(verbose=False)
# Get total number of pedestrians
N = crowd_density * A
# Define code inputs
if load_direction == 'vertical':
F0 = 280.0
# N, refer Table NA.8
# n.b 'walkers' value to be used in crowd loading calcs
else:
F0 = 70.0 # N, refer Tue 24/07/2018 12:56 <NAME> email
# Derive adjustment factors
if fv is None:
# If not provided explicitly set to damped frequency of target mode
fv = numpy.abs(self.f_d[mode_index])
k = UKNA_BSEN1991_2_Figure_NA_8(fv=fv,
analysis_type="walkers",
load_direction=load_direction,
makePlot=makePlot)
log_dec = 2*numpy.pi*self.eta[mode_index]
gamma = UKNA_BSEN1991_2_Figure_NA_9(logDec=log_dec,
groupType="crowd",
makePlot=makePlot)
# Effective number of pedestrians parameter, refer NA. 2.44.5(1)
# To generalise to deck of variable width, use area ratio as Seff/S
if calc_lambda:
Aeff = self.modal_areas[mode_index] / (0.634 * max_modeshape)
lambda_val = 0.634*Aeff/A
else:
lambda_val = 1.0 # conservative according to text - but not always!
# Calculate load intensity per NA.2.44.5(1)
load_intensity = 1.8*(F0/A)*k*((gamma*N/lambda_val)**0.5)
# Prepare markdown string of nicely formatted text
md_txt = ""
md_txt += "Key results from UK NA to BS EN 1991-2 load intensity calculation:"
md_txt += "\n" + "F0 = %.1f\t(N)" % F0
md_txt += "\n" + "A = %.2f\t(m2)" % A
md_txt += "\n" + "gamma_max = %.3f\t" % max_modeshape
md_txt += "\n" + "Aeff = %.2f\t(m2)" % Aeff
md_txt += "\n" + "rho = %.2f\t(P/m2)" % crowd_density
md_txt += "\n" + "N = %.1f" % N
md_txt += "\n" + "fv = %.3f\t(Hz)" % fv
md_txt += "\n" + "k = %.3f" % k
md_txt += "\n" + "log_dec = %.3f" % log_dec
md_txt += "\n" + "gamma = %.3f" % gamma
md_txt += "\n" + "lambda = %.3f" % lambda_val
md_txt += "\n" + "w = %.3f\t(N/m2)" % load_intensity
if verbose:
print("\n" + md_txt + "\n")
# Prepare dict to return key results
results_dict = {}
results_dict["F0"]=F0
results_dict["fv"]=fv
results_dict["rho"]=crowd_density
results_dict["N"]=N
results_dict["k"]=k
results_dict["lambda"]=lambda_val
results_dict["log_dec"]=log_dec
results_dict["w"]=load_intensity
results_dict["md_txt"]=md_txt
return load_intensity, results_dict
class HIVOSS(SteadyStateCrowdLoading):
"""
Implements HIVOSS rules for lateral vibration due to crowds
"""
def __init__(self,crowd_density:float,direction='Vertical',**kwargs):
"""
Initialisation method
***
Required:
* `crowd_density`, _float_ to denote crowd density, persons/m2
* `direction`, _string_ to denote loading / response direction.
Either 'Vertical' or 'Lateral' required.
***
Optional:
Refer optional keyword arguments in parent class __init__() method
"""
self.crowd_density = crowd_density
"""
Crowd density for design situation consider, P/m2
"""
self.direction = direction
"""
Direction of loading / response calculation
"""
# Run parent init function
super().__init__(**kwargs)
def calc_load_intensity(self,mode_index:int,fv:float,
verbose=True,makePlot=True):
"""
Function to calculate intensity of uniform deck load
according to HIVOSS guidance
Note: overrides parent class method
"""
print("Calculating load intensity for mode %d " % mode_index +
"according to HIVOSS...")
rslts_dict = {}
d = self.crowd_density
S = self.deck_area
rslts_dict["d"]=d
rslts_dict["S"]=S
if verbose:
print("Area of loaded surface, S: %.1f" % S)
# Calculate number of pedestrians on loaded surface
n = S * d
self.n = n
rslts_dict["n"] = n
"""
Number of pedestrians on loaded surface
"""
if verbose:
print("Number of pedestrians, n: %.1f" % n)
# Get damping ratio applicable to mode being considered
eta = self.eta[mode_index]
rslts_dict["eta"]=eta
if verbose:
print("Damping ratio for mode: %.4f" % eta)
# Calculate effective number of pedestrians on loaded surface
if d < 1.0:
n_dash = 10.8 * (eta*n)**0.5 / S
else:
n_dash = 1.85 * (n)**0.5 / S
self.n_dash = n_dash
"""
Effective number of pedestrians on loaded surface, [1/m2]
"""
rslts_dict["n_dash"]=n_dash
if verbose:
print("Effective number of pedestrians, n': %.1f" % n_dash)
# Define reference load
# refer Table 4-7, HIVOSS guidelines
direction = self.direction
rslts_dict["direction"]=direction
if direction == 'Vertical':
P = 280
elif direction == 'Longitudinal':
P = 140
elif direction == 'Lateral':
P = 35
else:
raise ValueError("Invalid `direction`")
self.P = P
"""
Reference load, P [N]
"""
rslts_dict["P"]=P
if verbose:
print("Reference load, P [N]': %.0f" % P)
# Calculate reduction factor
rslt = calc_psi_HIVOSS(fv=3.05,direction='Vertical',makePlot=makePlot)
psi = rslt[0]
rslts_dict["psi"]=psi
if verbose:
print("Reduction factor, psi': %.3f" % psi)
# Calculate load intensity
load_intensity = P * n_dash * psi
self.load_intensity = load_intensity
"""
Load intensity [N/m2] of UDL due to crowd loading
"""
if verbose:
print("Load intensity [N/m2]': %.2f" % load_intensity)
return load_intensity, rslts_dict
class PedestrianDynamics_transientAnalyses(dyn_analysis.Multiple):
"""
Implements the full set of analyses required to fully-implement the
method given in NA.2.44.4 of UK NA to BS EN 1991-2
i.e. transient analyses for both walkers and joggers, for all modes
"""
def __init__(self,
modalsys_obj,
bridgeClass='A',
**kwargs):
"""
Initialisation function
****
Required:
* `modalsys_obj`, modal system to which analysis relates
***
Optional:
* `bridgeClass`, _string_, either 'A', 'B', 'C', or 'D'.
Refer Table NA.7 for description of bridge classes
Additional keyword arguments may be passed. These should relate to the
`__init__()` function of the `Multiple` class.
"""
# Get mode indexs to loop over
# Note modes sorted into ascending frequency but in conjugate pairs
# Hence step through modes x2
nModes = modalsys_obj.GetSystemMatrices()["nDOF"]
mode_index_list = numpy.arange(0,2*nModes,2).tolist()
# Run parent init function
super().__init__(classDef=UKNA_BSEN1991_2_walkers_joggers,
dynsys_obj=modalsys_obj,
bridgeClass=bridgeClass,
mode_index=mode_index_list,
analysis_type=["walkers","joggers"],
verbose=False,
**kwargs)
# Save key variables as attributes
self.bridgeClass = bridgeClass
def plot_modal_params(self):
fig, axarr = plt.subplots(2)
fig.set_size_inches((6,8))
fig.subplots_adjust(hspace=0.4,right=0.8)
# Get modal properties as used in analyses
nModes = int(len(self.analysis_list)/2)
mode_index = numpy.arange(0,nModes,1)+1
fv = [x.fv for x in self.analysis_list][:nModes]
eta = [x.eta for x in self.analysis_list][:nModes]
# Plot details of system
ax = axarr[0]
ax.bar(mode_index,fv)
ax.set_xlabel("Mode index")
ax.set_xticks(mode_index)
ax.set_ylabel("$f_{d}$ (Hz)",fontsize=8.0)
ax.set_title("Damped natural frequencies")
ax = axarr[1]
ax.bar(mode_index,eta)
ax.set_xlabel("Mode index")
ax.set_xticks(mode_index)
ax.set_ylim([0,ax.get_ylim()[1]])
ax.set_ylabel("Damping ratio",fontsize=8.0)
| |
# MantaFlow fluid solver framework
# Copyright 2011 <NAME>, <NAME>
#
# @author: <NAME> http://marielenaeckert.com/
#
#
# Reconstruction of both density and velocity volume based on input images
#
# 0. make sure not to use more than ~4 threads (export OMP_NUM_THREADS=4)
# 1. adapt variable path, pathCalib, and captureFolderPath
# 2. example call "./manta ../scenes/reconstruct/reconDenVel.py calib20190813 0813_80_0085 100 3 8 5e-2 5e-4 5e-1 5e-2 1e-4 1e-3 0.8"
#
import os, sys, shutil, time, math, platform, datetime
import numpy as np
import _visualize as v
import _writeJson as wJ
from manta import *
from enum import Enum
class drawKind(Enum):
den3D = 1
vel3D = 2
den2D = 3
vel2D = 4
class reconKind(Enum):
synth = 1 # synthetic data + addSource, real or orthographic cameras, no cutoff, true first density field
synthReal = 2 # synthetic data + vel Inflow, real cameras, cutoff
real = 3 # real data, real inflow real cameras, cutoff
# drawKind: 1: 3D den, 2: 3D vel, 3: 2D den, 4: 2D vel
def saveVisGrid(grid, npy, output, dK, scaleVis, negativeValues=False):
if dK==drawKind.den3D or dK==drawKind.den2D: copyGridToArrayReal(grid, npy)
elif dK==drawKind.vel3D or dK==drawKind.vel2D: copyGridToArrayMAC(grid, npy)
np.savez_compressed(path+folderOut+'tmp.npz', data=npy)
if os.path.isfile(path+folderOut+output): os.remove(path+folderOut+output)
os.rename(path+folderOut+'tmp.npz', path+folderOut+output)
if dK==drawKind.den3D: v.draw3DDensityGridNpy(path+folderOut+output, scaleVis, rK == reconKind.synthReal, negativeValues)
elif dK==drawKind.vel3D: v.draw3DVelGridNpy(path+folderOut+output, scaleVis, rK == reconKind.synthReal)
elif dK==drawKind.den2D: v.draw2DDensityNpy(path+folderOut+output, scaleVis, rK == reconKind.synthReal, negativeValues)
elif dK==drawKind.vel2D: v.draw2DVelGridNpy(path+folderOut+output, scaleVis, rK == reconKind.synthReal)
def loadVel(grid, npy, filename):
npy = np.load(filename)['data']
copyArrayToGridMAC(npy, grid)
def loadDen(grid, npy, filename, scale=0, setBounds=False, bWidth=1):
npy = np.load(filename)['data']
copyArrayToGridReal(npy, grid)
if scale!=0: grid.multConst(scale)
if setBounds: grid.setBound(0,bWidth)
def loadImg(grid, npy, filename, scale=0, interpol=False, imgs=0):
npy = np.load(filename)['data']
copyArrayToGridReal(npy, grid)
if interpol: interpolateImgs(imgs,grid,scale)
elif scale!=0: grid.multConst(scale)
###### read input arguments ######
if len(sys.argv)<5:
print("four arguments required: calibFolder, captureFolder, res, and reconKind")
sys.exit()
path = '/home/eckert/results/'
calibFolder = sys.argv[1]
captureFolder = sys.argv[2]
res = int(sys.argv[3])# make sure this will result in an integer in the y-domain size
pathCalib = path+'/%s/%s_rays.txt'%(calibFolder,'%i')
captureFolderPath = 'input/%s/postprocessed/'%captureFolder
###### set parameters ######
if len(sys.argv)>5: scale = float(sys.argv[5])
else:
if rK == reconKind.real: scale = 6
elif rK == reconKind.synthReal: scale = 1#2.5
else: scale = 7.5
if len(sys.argv)>6: smoothDen = float(sys.argv[6])
else:
if rK == reconKind.synthReal and int(captureFolder) == 3: smoothDen = 1e-2
elif rK == reconKind.synthReal: smoothDen = 1e-4
else: smoothDen = 2.5e-2
if len(sys.argv)>7: kinDen = float(sys.argv[7])
else:
if rK == reconKind.synthReal and int(captureFolder) == 3: kinDen = 1e-3
elif rK == reconKind.synthReal: kinDen = 1e-6
else: kinDen = 5e-4
if len(sys.argv)>8: smoothVel = float(sys.argv[8])
else:
smoothVel = 6e-1
if rK == reconKind.synthReal:
if int(captureFolder)==2: smoothVel = 5e-1
elif int(captureFolder)==3: smoothVel = 5e-1
elif int(captureFolder)==4: smoothVel = 5e-1
elif int(captureFolder)==6: smoothVel = 5e-1
if len(sys.argv)>9: kinVel = float(sys.argv[9])
else:
kinVel = 5e-2
if rK == reconKind.synthReal:
if int(captureFolder)==2: kinVel = 4e-2
elif int(captureFolder)==3: kinVel = 5e-2
elif int(captureFolder)==4: kinVel = 4e-2
elif int(captureFolder)==6: kinVel = 4e-2
if len(sys.argv)>10: smoothInfl = float(sys.argv[10])
else: smoothInfl = 1e-2
if len(sys.argv)>11: kinInfl = float(sys.argv[11])
else: kinInfl = 1e-2
if len(sys.argv)>12: velInflowValue = float(sys.argv[12])
else:
velInflowValue = res/73. if '_90_' in captureFolder else res/80.
if rK == reconKind.synthReal:
velInflowValue = 0.006*res
if int(captureFolder)==2: velInflowValue = 0.95
elif int(captureFolder)==3: velInflowValue = 0.98
elif int(captureFolder)==4: velInflowValue = 0.9
elif int(captureFolder)==6: velInflowValue = 1.05
###### setup reconstruction parameters ######
rK = reconKind(int(sys.argv[4]))#reconKind.synthReal
startFrame = 0 if rK == reconKind.real else 14
lastFrame = startFrame+151
step = 1
restartRecon = True
resFactor = res/50.
ms = res/2 if rK != reconKind.synthReal else res
factorY = 1.77 if rK != reconKind.synth else 1.33
boxTest = False and rK == reconKind.synth
###### end setup reconstruction parameters ######
###### create grids ######
gs = vec3(res,math.ceil(factorY*res),res) if math.ceil(factorY*res)%2==0 else vec3(res,math.ceil(factorY*res)+1,res)
s = Solver(name='volume', gridSize = gs, dim=3)
sO = Solver(name='volumeHigh', gridSize = vec3(200,math.ceil(factorY*200),200), dim=3)
vel = s.create(MACGrid)
velUpdate = s.create(MACGrid)
denPredict = s.create(RealGrid)
den = s.create(RealGrid)
denTarget = s.create(RealGrid)
denHelp = s.create(RealGrid)
pressure = s.create(RealGrid)
src0 = s.create(RealGrid) if rK != reconKind.real else 0
flags = s.create(FlagGrid)
den1O = sO.create(RealGrid) if rK != reconKind.real else 0
flagsO = sO.create(FlagGrid) if rK != reconKind.real else 0
bWidth=1
flags.initDomain(boundaryWidth=bWidth)
flags.fillGrid()
setOpenBound(flags, 1, 'xXyYzZ', FlagOutflow|FlagEmpty)
###### end create grids ######
###### create numpy arrays ######
velNpy = np.empty(shape=[int(gs.z), int(gs.y), int(gs.x), 3], order='C')
denNpy = np.empty(shape=[int(gs.z), int(gs.y), int(gs.x), 1], order='C')
den1ONpy = np.empty(shape=[200, math.ceil(factorY*200), 200, 1], order='C') if rK != reconKind.real else 0
###### end create numpy arrays ######
###### setup path and filenames ######
folderO = path+'synthReal/synthReal_%06d_7/'%int(captureFolder) if rK == reconKind.synth or rK == reconKind.synthReal else path+captureFolderPath
folderIn = folderO + '%d/' % res if (res<200 and rK != reconKind.real) else folderO
prefix = 'rDV'
suffix = '%d_%s_%.1f_%.1e_%.1e_%.1e_%.1e_%.1e_%.1e_%.1f' % (res, rK.name, scale, smoothDen, kinDen, smoothVel, kinVel, smoothInfl, kinInfl, velInflowValue)
folderOut = prefix+'_%06d_%s/'%(int(captureFolder),suffix) if rK != reconKind.real else prefix+'_%s_%s/'%(captureFolder, suffix)
densityName = 'density_%06d.npz'
velocityName = 'velocity_%06d.npz'
if not os.path.exists(path+folderOut): os.makedirs(os.path.dirname(path+folderOut))
try:
scriptname = 'reconDenVel.py'
shutil.copy(os.path.abspath(os.path.dirname(sys.argv[0]))+'/'+scriptname, path+folderOut+scriptname)
except OSError as ecx:
raise;
###### end setup path and filenames ######
###### setup inflow region ######
if rK == reconKind.real or rK == reconKind.synthReal:
p0 = vec3(math.ceil(gs.x*0.44),0.,math.ceil(gs.z*0.38))
p1 = vec3(math.ceil(gs.x*0.64),math.ceil(gs.x*0.068),math.ceil(gs.z*0.58))
if rK == reconKind.synthReal and int(captureFolder) == 4:
p0 = vec3(math.ceil(gs.x*0.42),0.,math.ceil(gs.z*0.4))
p1 = vec3(math.ceil(gs.x*0.66),math.ceil(gs.x*0.1),math.ceil(gs.z*0.60))
else:
p0 = vec3(0,0,0)
p1 = vec3(0,0,0)
srcInflow = 0
if rK == reconKind.synthReal or rK == reconKind.real:
srcInflow = ShapeDetails(s, 2, (p0+p1)/2., (p1-p0)/2., 0)
setInflowStructure(flags, srcInflow)#, 10, False, src0)
#saveVisGrid(src0, denNpy, 'finalSrc_%06d.npz'%0, drawKind.den3D, 0.5*scale)
#sys.exit()
if rK != reconKind.real: upsampleFlagGrid(flagsO, flags)
###### end setup inflow region ######
###### tomography part ######
## image parameters ##
angles = [0,1,2,3,4]
width = res*6
height = math.ceil(width*1.77) if math.ceil(width*1.77)%2==0 else math.ceil(width*1.77)+1
stepSize = 0.7
minNumCams = 2
switchXY = True#False
useDenTarget = False # use same denTarget instead of denP+denU where denU is recalculated in each bivariate OF PD iter
## tomography parameters ##
pdT = PDParams(s, 0.01, 100, 1, 10) #PDParams(parent,sigma, tau, theta, mxIter)
if rK == reconKind.real: wT = RegWeightsTomo(s, smoothDen*resFactor, kinDen*resFactor, smoothInfl*resFactor, kinInfl*resFactor) #RegWeightsTomo(parent,smooth,kinetic)
else: wT = RegWeightsTomo(s, smoothDen*resFactor, kinDen*resFactor, smoothInfl*resFactor, kinInfl*resFactor) #RegWeightsTomo(parent,smooth,kinetic)
#TomoParams(parent,t,path,threshVH,threshMask,stepSize,minNumCams,numPixels,numVoxels,angleWeight,pdParams,regWeights,shapeLimit);
tomoParams = TomoParams(s, startFrame, path+folderOut, 1e-9, 1e-4, stepSize, minNumCams, 0, 0, 1, pdT, wT, 0)
pdT_trgt = PDParams(s, 0.01, 100, 1, 20) #PDParams(parent,sigma, tau, theta, mxIter)
if rK == reconKind.real: wT_trgt = RegWeightsTomo(s, 1e-2*smoothDen*resFactor, 1e-2*kinDen*resFactor, 0, 0) #RegWeightsTomo(parent,smooth,kinetic)
else: wT_trgt = RegWeightsTomo(s, 1e-1*smoothDen*resFactor, kinDen*resFactor, 0, 0) #RegWeightsTomo(parent,smooth,kinetic)
#TomoParams(parent,t,path,threshVH,threshMask,stepSize,minNumCams,numPixels,numVoxels,angleWeight,pdParams,regWeights,shapeLimit);
tomoParams_trgt = TomoParams(s, startFrame, path+folderOut, 1e-9, 1e-4, stepSize, minNumCams, 0, 0, 1, pdT_trgt, wT_trgt, 0)
# source tomography parameters
if rK != reconKind.synth:
# tomography for src
if rK == reconKind.real: wT_firstDen = RegWeightsTomo(s, 1e-2*smoothDen*resFactor, 1e-2*kinDen*resFactor, 0, 0) #RegWeightsTomo(parent,smooth,kinetic)
else: wT_firstDen = RegWeightsTomo(s, 0, 1e-6, 0, 0) #RegWeightsTomo(parent,smooth,kinetic)
src_firstDen = ShapeDetails(s, 2, gs*vec3(0.5,0.05,0.5), gs*vec3(0.3, 0.1, 0.3), 3) #ShapeDetails(parent,shape,center,vec,radius)
#TomoParams(parent,t,path,threshVH,threshMask,stepSize,numPixels,numVoxels,angleWeight,pdParams,regWeights,shapeLimit);
tomoParams_firstDen = TomoParams(s, startFrame, path+folderOut, 1e-9, 1e-4, stepSize, minNumCams, 0, 0, 1, pdT, wT_firstDen, src_firstDen)
## image FluidSolver to read in captured images ##
if rK == reconKind.real:
sImgsO = Solver(name='imagesO', gridSize = vec3(1080, 1920, 5), dim=3)
imgsO = sImgsO.create(RealGrid)
imgsONpy = np.empty(shape=[5, 1920, 1080, 1], order='C')
# image FluidSolver for target images
gsImgs = vec3(width,height,len(angles))
sImgs = Solver(name='images', gridSize = gsImgs, dim=3) if len(angles)>1 else Solver(name='main', gridSize = gsImgs, dim=2)
imgs = sImgs.create(RealGrid)
imgsNpy = np.empty(shape=[len(angles), height, width, 1], order='C')
## create image class ##
orthographic = False and rK == reconKind.synth
if orthographic: angles = [0,1,2]
i = Image(sImgs,width,height,len(angles),pathCalib,switchXY,orthographic)
###### end tomography part ######
###### optical flow part ######
pd = PDParams(s, 0.01, 10, 1, 10) #PDParams(parent, sigma, tau, theta, mxIter)
if rK == reconKind.real: w = RegWeightsOF(s, smoothVel*resFactor, kinVel*resFactor) #RegWeightsOF(parent,smooth,kinetic,kineticZ=-1,adaptiveKineticZ=false,sumZero=0)
else: w = RegWeightsOF(s, smoothVel*resFactor, kinVel*resFactor) #RegWeightsOF(parent,smooth,kinetic,kineticZ=-1,adaptiveKineticZ=false,sumZero=0)
#OFParams(parent,t,minSize,bivariate,path,s,strides,N,dim,heightOfSrc,pdParams,regWeights)
ofParams = OFParams(s, startFrame, ms, True, path+folderOut, gs, vec3(1, gs.x, gs.x*gs.y), gs.x*gs.y*gs.z, 3, srcInflow.getHeightOfSrc(), useDenTarget, velInflowValue, pd, w)
###### end optical flow part ######
###### viscosity approximation ######
visc = 0.0000148 # air at 15dC = 1.48 * 10^-5 ##0.000001 # ca. "water" ##0.0 # off
timestep = 1./30.
alpha = visc * timestep * float(res*res)
###### end viscosity approximation ######
###### prepare initial condition and setup target images ######
restartedRecon = False
if boxTest:
source0 = s.create(Box, center=gs*vec3(0.5,0.5,0.5), size=gs*vec3(0.1))
source0.applyToGrid(grid=src0, value=0.7)
source1 = s.create(Box, center=gs*vec3(0.5,0.51,0.5), size=gs*vec3(0.1))
source1.applyToGrid(grid=den, value=0.7)
else:
# get current state (den and vel)
if restartRecon:
# find last successful reconstruction step, set this to startFrame
for t in range(lastFrame-step, startFrame+step, -step):
if os.path.isfile(path+folderOut+'velocity_%06d.npz'%t) and os.path.isfile(path+folderOut+'density_%06d.npz'%t):
startFrame = t
restartedRecon = True
break
if restartedRecon:
print('restartRecon %d'%(startFrame))
sys.stdout.flush()
loadVel(vel, velNpy, path+folderOut+'velocity_%06d.npz'%startFrame)
loadDen(den, denNpy, path+folderOut+'density_%06d.npz' %startFrame)
# or get first den field
if not restartedRecon:
if rK == reconKind.real or rK == reconKind.synthReal:
# target images for first den estimation
if rK == reconKind.real: loadImg(imgsO, imgsONpy, folderO+'imgs_%06d.npz'%startFrame, scale, True, imgs)
elif rK == reconKind.synthReal: loadImg(imgs, imgsNpy, folderO+'imgs_%06d.npz'%startFrame, scale)
# first den estimation
saveVisGrid(imgs, imgsNpy, 'imgsTarget_%06d.npz'%startFrame, drawKind.den2D, 3.0)
reconstructDensity(den, i, imgs, flags, tomoParams_firstDen, 0) # mask unnused
# save estimated first den
i.render(imgs, den, stepSize, '', '', True, flags)
saveVisGrid(imgs, imgsNpy, 'imgsDiff_%06d.npz'%startFrame, drawKind.den2D, 3.0, True)
i.render(imgs, den, stepSize, '', '', False, flags)
saveVisGrid(imgs, imgsNpy, 'imgsRendered_%06d.npz'%startFrame, drawKind.den2D, 3.0)
saveVisGrid(den, denNpy, 'density_%06d.npz'%startFrame, drawKind.den3D, 3.0)
elif rK == reconKind.synth:
loadDen(den, denNpy, folderIn+densityName%startFrame, scale, True, bWidth)
# load target images (imgs)
if rK == reconKind.real: loadImg(imgsO, imgsONpy, folderO+'imgs_%06d.npz'%(startFrame+step), scale, True, imgs)
elif rK == reconKind.synthReal: loadImg(imgs, imgsNpy, folderO+'imgs_%06d.npz'%(startFrame+step), scale)
elif rK == reconKind.synth:
loadDen(den1O, den1ONpy, folderO+'densityH_%06d.npz'%(startFrame+step), scale, True, bWidth)
i.render(imgs, den1O, stepSize, '', '', False, flagsO)
# setup source
if rK == reconKind.synth: loadDen(src0, denNpy, folderIn+densityName%0, scale)
###### end prepare initial condition and setup target images ######
wJ.writeJasonFile(path+folderOut,calibFolder,captureFolder,folderOut,rK,res,factorY,p0,p1,scale,tomoParams,tomoParams_firstDen,tomoParams_trgt,ofParams,angles,width,height,stepSize,minNumCams,orthographic,restartedRecon,startFrame)
for t in range(startFrame+step, lastFrame, | |
occluded tracks from the unmatched confirmed tracks
# if you used default matching above, (because we merged both into one for default
# matching)
if default_matching:
newly_occluded_tracks = [i for i in newly_occluded_tracks if i in unmatched_tracks]
unmatched_tracks = [i for i in unmatched_tracks if i not in newly_occluded_tracks]
# if we weren't using occluded state, then we havent formed any variable called
# newly_occluded_tracks yet, so just call unmatched_tracks as this for the next step
if self.only_filtering and not default_matching:
newly_occluded_tracks = unmatched_tracks
# either do freespace filtering or if we werent supposed to filter, then there is no
# notion of previously_occluded_tracks (these are the set of tracks that were filtered
# so they are going to be deleted if stored in this variable) and all newly_occluded_tracks
# are still maintained in the occluded state
if (freespace_filtering or self.only_filtering) and not default_matching:
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
elif (freespace_filtering or self.only_filtering) and default_matching and bugfix:
# print("Executing bugfix")
pv1, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
pv2, unmatched_tracks = self.reason_for_reappearances_mask(
self.tracks,
unmatched_tracks,
filtering_factor)
previously_occluded_tracks = pv1 + pv2
elif (freespace_filtering or self.only_filtering) and default_matching and not bugfix:
# print("Not executing bugfix")
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
else:
previously_occluded_tracks = []
occluded_tracks_ = newly_occluded_tracks
# if we were only filtering, then there was no notion of occluded_tracks_ and these are
# actually the tracks that did not get filtered and so really, are still unmatched
if self.only_filtering and not default_matching:
unmatched_tracks = occluded_tracks_
occluded_tracks_ = []
# two caveats: one, some variables or if statements might be redundant, pls excuse my
# coding, two, because of this reason, always have to take care that if only_filtering is
# set to true then default_matching should be set to false for the code to execute properly
# print("matches, unmatched tracks, unmatched detections, occluded_tracks_, previously_occluded_tracks",
# len(matches), len(unmatched_tracks), len(unmatched_detections),
# len(occluded_tracks_), len(previously_occluded_tracks))
return matches, unmatched_tracks, unmatched_detections, occluded_tracks_, previously_occluded_tracks
# DO NOT TRUST THIS CODE
def _match_swap(self, detections, default_matching=False,
freespace_filtering=True, occluded_factor=1.0,
filtering_factor=1.0, extrapolated_iou_match=False,
appearance_match=True, bugfix=False):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
print("detection indices", detection_indices)
print("track indices", track_indices)
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices, temporal_noise=self.temporal_noise,
tn=self.tn)
return cost_matrix
# Split track set into confirmed, occluded and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
occluded_tracks = [
i for i, t in enumerate(self.tracks) if t.is_occluded()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed() and not t.is_occluded()]
# find all occluded tracks from the set of confirmed tracks and collectively
# call them newly_occluded_tracks. the set of tracks that were not occluded will
# still be in confirmed_tracks.
if not self.only_filtering:
newly_occluded_tracks, confirmed_tracks = self.reason_for_occlusions(
self.tracks,
confirmed_tracks,
occluded_factor)
newly_occluded_tracks = newly_occluded_tracks + occluded_tracks
# if using default matching, merge all kinds of tracks together into confirmed_tracks
# and match these together based on appearance. later we will segregate them again
if not self.only_filtering and default_matching: # and appearance_match:
confirmed_tracks = confirmed_tracks + newly_occluded_tracks + unconfirmed_tracks
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, confirmed_tracks)
# similar, except we dont match the confirmed and occluded tracks together now
if not default_matching: # and appearance_match:
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, confirmed_tracks)
# similar idea, above was for matching confirmed tracks, now we are matching the
# occluded tracks. in this case, the occluded tracks that actually got matched to
# a detection, we should call it a confirmed track now and the ones that didnt match
# should still be in the occluded state.
if not self.only_filtering and not default_matching: # and appearance_match:
matches_c, newly_occluded_tracks, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, newly_occluded_tracks, unmatched_detections)
iou_track_candidates = unmatched_tracks_b
unmatched_tracks_b = []
# print(len(iou_track_candidates), len(unmatched_detections))
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age,
self.tracks, detections, iou_track_candidates, unmatched_detections)
# very trivial, just takes care of whether we have three sets of matches till
# now or only two
if not self.only_filtering and not default_matching:
matches = matches_a + matches_b + matches_c # + matches_d
else:
matches = matches_a + matches_b # + matches_c # + matches_d
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
# this step segregates the occluded tracks from the unmatched confirmed tracks
# if you used default matching above, (because we merged both into one for default
# matching)
if default_matching:
newly_occluded_tracks = [i for i in newly_occluded_tracks if i in unmatched_tracks]
unmatched_tracks = [i for i in unmatched_tracks if i not in newly_occluded_tracks]
# if we weren't using occluded state, then we havent formed any variable called
# newly_occluded_tracks yet, so just call unmatched_tracks as this for the next step
if self.only_filtering and not default_matching:
newly_occluded_tracks = unmatched_tracks
# either do freespace filtering or if we werent supposed to filter, then there is no
# notion of previously_occluded_tracks (these are the set of tracks that were filtered
# so they are going to be deleted if stored in this variable) and all newly_occluded_tracks
# are still maintained in the occluded state
if (freespace_filtering or self.only_filtering) and not default_matching:
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
elif (freespace_filtering or self.only_filtering) and default_matching and bugfix:
# print("Executing bugfix")
pv1, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
pv2, unmatched_tracks = self.reason_for_reappearances(
self.tracks,
unmatched_tracks,
filtering_factor)
previously_occluded_tracks = pv1 + pv2
elif (freespace_filtering or self.only_filtering) and default_matching and not bugfix:
# print("Not executing bugfix")
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
else:
previously_occluded_tracks = []
occluded_tracks_ = newly_occluded_tracks
# if we were only filtering, then there was no notion of occluded_tracks_ and these are
# actually the tracks that did not get filtered and so really, are still unmatched
if self.only_filtering and not default_matching:
unmatched_tracks = occluded_tracks_
occluded_tracks_ = []
# two caveats: one, some variables or if statements might be redundant, pls excuse my
# coding, two, because of this reason, always have to take care that if only_filtering is
# set to true then default_matching should be set to false for the code to execute properly
# print("matches, unmatched tracks, unmatched detections, occluded_tracks_, previously_occluded_tracks",
# len(matches), len(unmatched_tracks), len(unmatched_detections),
# len(occluded_tracks_), len(previously_occluded_tracks))
return matches, unmatched_tracks, unmatched_detections, occluded_tracks_, previously_occluded_tracks
def _initiate_track(self, detection, temporal_noise=True, tn=-1):
mean_depth = self.compute_mean_depth_from_mask(self.image, detection, self.sequence_info)
# print(mean_depth)
det = list(detection.to_xyah())
det = det + [mean_depth]
mean, covariance = self.kf.initiate(det, temporal_noise, tn)
self.tracks.append(Track(
mean, covariance, self._next_id,
self.n_init, self.max_age,
detection.feature))
self._next_id += 1
def compute_mean_depth(self, depth_map, detection, seq_info):
scale_x = seq_info["image_size"][1] / float(depth_map.shape[1])
scale_y = seq_info["image_size"][0] / float(depth_map.shape[0])
box = detection.tlwh.copy()
box[2:] += box[:2]
box = [box[0]/scale_x,
box[1]/scale_y,
box[2]/scale_x,
box[3]/scale_y]
box = [int(x) for x in box]
box = [max(0, box[0]), max(0, box[1]),
max(0, min(depth_map.shape[1], box[2])),
max(0, min(depth_map.shape[0], box[3]))]
if 0 in box[2:] \
or box[0] >= depth_map.shape[1] \
or box[1] >= depth_map.shape[0] \
or box[0] == box[2] \
or box[1] == box[2]:
return -1
box = depth_map[box[1]:box[3], box[0]:box[2]].copy()
return np.mean(box)
def compute_mean_depth_from_mask(self, depth_map, detection, seq_info, mask=None):
width = depth_map.shape[1]
height = depth_map.shape[0]
# print(detection.mask['counts'], detection.mask['size'])
if detection is not None:
m = detection.mask.copy()
elif mask is not None:
m = mask
else:
print("One of detection or mask has to be non-None")
exit(0)
m = resize(m, (height, width), order=1)
inter_mask = np.zeros((height, width), dtype=float)
inter_mask = np.where(m > 10e-6, depth_map, 0)
if 0 in np.nonzero(inter_mask)[0].shape:
return -1
return np.mean(inter_mask[np.nonzero(inter_mask)])
def align(self, im1_gray, im2_gray):
# maximal number of iterations (original 50)
number_of_iterations = 50 # 100
# Threshold increment between two iterations (original 0.001)
termination_eps = 0.001 # 0.00001
# Which warp mode to use (cv2.MOTION_EUCLIDEAN, cv2.MOTION_AFFINE, ...)
warp_mode = cv2.MOTION_EUCLIDEAN
# im1_gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)
# im2_gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
warp_matrix = np.eye(2, 3, dtype=np.float32)
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
number_of_iterations,
termination_eps)
try:
cc, warp_matrix = cv2.findTransformECC(im1_gray,
im2_gray, warp_matrix,
warp_mode, | |
EnumInfo('hourglass', 8, 0)
e_4055[9] = EnumInfo('icon', 9, 0)
e_4055[10] = EnumInfo('size', 10, 0)
e_4055[11] = EnumInfo('nopointer', 11, 0)
e_4055[12] = EnumInfo('appstarting', 12, 0)
e_4055[13] = EnumInfo('help', 13, 0)
e_4055[14] = EnumInfo('hyperlink', 14, 0)
enum_main[0x4055] = e_4055
e_4056 = dict()
e_4056[1] = EnumInfo('regstring', 1, 0)
e_4056[2] = EnumInfo('regexpandstring', 2, 0)
e_4056[3] = EnumInfo('regbinary', 3, 0)
e_4056[4] = EnumInfo('regulong', 4, 0)
e_4056[5] = EnumInfo('regulongbigendian', 5, 0)
e_4056[6] = EnumInfo('reglink', 6, 0)
e_4056[7] = EnumInfo('regmultistring', 7, 0)
enum_main[0x4056] = e_4056
e_4057 = dict()
e_4057[0] = EnumInfo('once', 0, 0)
e_4057[1] = EnumInfo('monthly', 1, 0)
e_4057[2] = EnumInfo('yearly', 2, 0)
enum_main[0x4057] = e_4057
e_4058 = dict()
e_4058[0] = EnumInfo('off', 0, 0)
e_4058[1] = EnumInfo('focusrect', 1, 0)
e_4058[2] = EnumInfo('hand', 2, 0)
enum_main[0x4058] = e_4058
e_4059 = dict()
e_4059[0] = EnumInfo('excel', 0, 0)
e_4059[1] = EnumInfo('text', 1, 0)
e_4059[2] = EnumInfo('csv', 2, 0)
e_4059[3] = EnumInfo('sylk', 3, 0)
e_4059[4] = EnumInfo('wks', 4, 0)
e_4059[5] = EnumInfo('wk1', 5, 0)
e_4059[6] = EnumInfo('dif', 6, 0)
e_4059[7] = EnumInfo('dbase2', 7, 0)
e_4059[8] = EnumInfo('dbase3', 8, 0)
e_4059[9] = EnumInfo('sqlinsert', 9, 0)
e_4059[10] = EnumInfo('clipboard', 10, 0)
e_4059[11] = EnumInfo('psreport', 11, 0)
e_4059[12] = EnumInfo('wmf', 12, 0)
e_4059[13] = EnumInfo('htmltable', 13, 0)
e_4059[14] = EnumInfo('excel5', 14, 0)
e_4059[15] = EnumInfo('xml', 15, 0)
e_4059[16] = EnumInfo('xslfo', 16, 0)
e_4059[17] = EnumInfo('pdf', 17, 0)
e_4059[18] = EnumInfo('excel8', 18, 0)
e_4059[19] = EnumInfo('emf', 19, 0)
e_4059[20] = EnumInfo('tiff', 20, 0)
e_4059[21] = EnumInfo('png', 21, 0)
e_4059[22] = EnumInfo('jpeg', 22, 0)
e_4059[23] = EnumInfo('bmp', 23, 0)
enum_main[0x4059] = e_4059
e_405a = dict()
e_405a[0] = EnumInfo('totop', 0, 0)
e_405a[1] = EnumInfo('tobottom', 1, 0)
e_405a[2] = EnumInfo('behind', 2, 0)
e_405a[65535] = EnumInfo('topmost', 65535, 0)
e_405a[65534] = EnumInfo('notopmost', 65534, 0)
enum_main[0x405a] = e_405a
e_405b = dict()
e_405b[0] = EnumInfo('frombeginning', 0, 0)
e_405b[1] = EnumInfo('fromcurrent', 1, 0)
e_405b[2] = EnumInfo('fromend', 2, 0)
enum_main[0x405b] = e_405b
e_405c = dict()
e_405c[0] = EnumInfo('stretch', 0, 0)
e_405c[1] = EnumInfo('clip', 1, 0)
e_405c[2] = EnumInfo('autosize', 2, 0)
e_405c[3] = EnumInfo('zoom', 3, 0)
enum_main[0x405c] = e_405c
e_405d = dict()
e_405d[0] = EnumInfo('spacing1', 0, 0)
e_405d[1] = EnumInfo('spacing15', 1, 0)
e_405d[2] = EnumInfo('spacing2', 2, 0)
enum_main[0x405d] = e_405d
e_405e = dict()
e_405e[1] = EnumInfo('previewselect', 1, 0)
e_405e[2] = EnumInfo('previewinsert', 2, 0)
e_405e[3] = EnumInfo('previewdelete', 3, 0)
e_405e[4] = EnumInfo('previewupdate', 4, 0)
enum_main[0x405e] = e_405e
e_405f = dict()
e_405f[1] = EnumInfo('previewfunctionretrieve', 1, 0)
e_405f[2] = EnumInfo('previewfunctionreselectrow', 2, 0)
e_405f[3] = EnumInfo('previewfunctionupdate', 3, 0)
enum_main[0x405f] = e_405f
e_4060 = dict()
e_4060[0] = EnumInfo('stgreadwrite', 0, 0)
e_4060[1] = EnumInfo('stgread', 1, 0)
e_4060[2] = EnumInfo('stgwrite', 2, 0)
enum_main[0x4060] = e_4060
e_4061 = dict()
e_4061[0] = EnumInfo('stgexclusive', 0, 0)
e_4061[1] = EnumInfo('stgdenynone', 1, 0)
e_4061[2] = EnumInfo('stgdenyread', 2, 0)
e_4061[3] = EnumInfo('stgdenywrite', 3, 0)
enum_main[0x4061] = e_4061
e_4062 = dict()
e_4062[0] = EnumInfo('tabsontop', 0, 0)
e_4062[1] = EnumInfo('tabsonleft', 1, 0)
e_4062[2] = EnumInfo('tabsonright', 2, 0)
e_4062[3] = EnumInfo('tabsonbottom', 3, 0)
e_4062[4] = EnumInfo('tabsonleftandright', 4, 0)
e_4062[5] = EnumInfo('tabsontopandbottom', 5, 0)
e_4062[6] = EnumInfo('tabsonrightandleft', 6, 0)
e_4062[7] = EnumInfo('tabsonbottomandtop', 7, 0)
enum_main[0x4062] = e_4062
e_4063 = dict()
e_4063[0] = EnumInfo('anycase', 0, 0)
e_4063[1] = EnumInfo('upper', 1, 0)
e_4063[2] = EnumInfo('lower', 2, 0)
enum_main[0x4063] = e_4063
e_4064 = dict()
e_4064[0] = EnumInfo('bold', 0, 0)
e_4064[1] = EnumInfo('italic', 1, 0)
e_4064[2] = EnumInfo('strikeout', 2, 0)
e_4064[3] = EnumInfo('subscript', 3, 0)
e_4064[4] = EnumInfo('superscript', 4, 0)
e_4064[5] = EnumInfo('underlined', 5, 0)
enum_main[0x4064] = e_4064
e_4065 = dict()
e_4065[0] = EnumInfo('alignatleft', 0, 0)
e_4065[1] = EnumInfo('alignattop', 1, 0)
e_4065[2] = EnumInfo('alignatright', 2, 0)
e_4065[3] = EnumInfo('alignatbottom', 3, 0)
e_4065[4] = EnumInfo('floating', 4, 0)
enum_main[0x4065] = e_4065
e_4066 = dict()
e_4066[0] = EnumInfo('traditionaltoolbar', 0, 0)
e_4066[1] = EnumInfo('contemporarytoolbar', 1, 0)
enum_main[0x4066] = e_4066
e_4067 = dict()
e_4067[0] = EnumInfo('roottreeitem', 0, 0)
e_4067[1] = EnumInfo('nexttreeitem', 1, 0)
e_4067[2] = EnumInfo('previoustreeitem', 2, 0)
e_4067[3] = EnumInfo('parenttreeitem', 3, 0)
e_4067[4] = EnumInfo('childtreeitem', 4, 0)
e_4067[5] = EnumInfo('firstvisibletreeitem', 5, 0)
e_4067[6] = EnumInfo('nextvisibletreeitem', 6, 0)
e_4067[7] = EnumInfo('previousvisibletreeitem', 7, 0)
e_4067[8] = EnumInfo('drophighlighttreeitem', 8, 0)
e_4067[9] = EnumInfo('currenttreeitem', 9, 0)
enum_main[0x4067] = e_4067
e_4068 = dict()
e_4068[0] = EnumInfo('activate', 0, 0)
e_4068[1] = EnumInfo('begindrag', 1, 0)
e_4068[2] = EnumInfo('beginlabeledit', 2, 0)
e_4068[3] = EnumInfo('beginrightdrag', 3, 0)
e_4068[4] = EnumInfo('clicked', 4, 0)
e_4068[5] = EnumInfo('close', 5, 0)
e_4068[6] = EnumInfo('closequery', 6, 0)
e_4068[7] = EnumInfo('collapsed', 7, 0)
e_4068[8] = EnumInfo('collapsing', 8, 0)
e_4068[9] = EnumInfo('columnclick', 9, 0)
e_4068[10] = EnumInfo('constructor', 10, 0)
e_4068[11] = EnumInfo('datachange', 11, 0)
e_4068[12] = EnumInfo('datechanged', 12, 0)
e_4068[13] = EnumInfo('dberror', 13, 0)
e_4068[14] = EnumInfo('deactivate', 14, 0)
e_4068[15] = EnumInfo('deleteallitems', 15, 0)
e_4068[16] = EnumInfo('deleteitem', 16, 0)
e_4068[17] = EnumInfo('destructor', 17, 0)
e_4068[18] = EnumInfo('doubleclicked', 18, 0)
e_4068[19] = EnumInfo('dragdrop', 19, 0)
e_4068[20] = EnumInfo('dragenter', 20, 0)
e_4068[21] = EnumInfo('dragleave', 21, 0)
e_4068[22] = EnumInfo('dragwithin', 22, 0)
e_4068[23] = EnumInfo('editchanged', 23, 0)
e_4068[24] = EnumInfo('endlabeledit', 24, 0)
e_4068[25] = EnumInfo('expanded', 25, 0)
e_4068[26] = EnumInfo('expanding', 26, 0)
e_4068[27] = EnumInfo('fileexists', 27, 0)
e_4068[28] = EnumInfo('gesture', 28, 0)
e_4068[29] = EnumInfo('getfocus', 29, 0)
e_4068[30] = EnumInfo('hide', 30, 0)
e_4068[31] = EnumInfo('hotlinkalarm', 31, 0)
e_4068[32] = EnumInfo('idle', 32, 0)
e_4068[33] = EnumInfo('insertitem', 33, 0)
e_4068[34] = EnumInfo('inputfieldselected', 34, 0)
e_4068[35] = EnumInfo('itemchanged', 35, 0)
e_4068[36] = EnumInfo('itemchanging', 36, 0)
e_4068[37] = EnumInfo('itemcollapsed', 37, 0)
e_4068[38] = EnumInfo('itemcollapsing', 38, 0)
e_4068[39] = EnumInfo('itemexpanded', 39, 0)
e_4068[40] = EnumInfo('itemexpanding', 40, 0)
e_4068[41] = EnumInfo('itempopulate', 41, 0)
e_4068[42] = EnumInfo('itemerror', 42, 0)
e_4068[43] = EnumInfo('itemfocuschanged', 43, 0)
e_4068[44] = EnumInfo('key', 44, 0)
e_4068[45] = EnumInfo('linedown', 45, 0)
e_4068[46] = EnumInfo('lineleft', 46, 0)
e_4068[47] = EnumInfo('lineright', 47, 0)
e_4068[48] = EnumInfo('lineup', 48, 0)
e_4068[49] = EnumInfo('losefocus', 49, 0)
e_4068[50] = EnumInfo('modified', 50, 0)
e_4068[51] = EnumInfo('mousedown', 51, 0)
e_4068[52] = EnumInfo('mousemove', 52, 0)
e_4068[53] = EnumInfo('mouseup', 53, 0)
e_4068[54] = EnumInfo('moved', 54, 0)
e_4068[55] = EnumInfo('notify', 55, 0)
e_4068[56] = EnumInfo('open', 56, 0)
e_4068[57] = EnumInfo('other', 57, 0)
e_4068[58] = EnumInfo('pagedown', 58, 0)
e_4068[59] = EnumInfo('pageleft', 59, 0)
e_4068[60] = EnumInfo('pageright', 60, 0)
e_4068[61] = EnumInfo('pageup', 61, 0)
e_4068[62] = EnumInfo('pictureselected', 62, 0)
e_4068[63] = EnumInfo('pipeend', 63, 0)
e_4068[64] = EnumInfo('pipemeter', 64, 0)
e_4068[65] = EnumInfo('pipestart', 65, 0)
e_4068[66] = EnumInfo('printend', 66, 0)
e_4068[67] = EnumInfo('printfooter', 67, 0)
e_4068[68] = EnumInfo('printheader', 68, 0)
e_4068[69] = EnumInfo('printpage', 69, 0)
e_4068[70] = EnumInfo('printstart', 70, 0)
e_4068[71] = EnumInfo('rbuttondown', 71, 0)
e_4068[72] = EnumInfo('rbuttonup', 72, 0)
e_4068[73] = EnumInfo('recognitionresult', 73, 0)
e_4068[74] = EnumInfo('remoteexec', 74, 0)
e_4068[75] = EnumInfo('remotehotlinkstart', 75, 0)
e_4068[76] = EnumInfo('remotehotlinkstop', 76, 0)
e_4068[77] = EnumInfo('remoterequest', 77, 0)
e_4068[78] = EnumInfo('remotesend', 78, 0)
e_4068[79] = EnumInfo('rename', 79, 0)
e_4068[80] = EnumInfo('resize', 80, 0)
e_4068[81] = EnumInfo('retrieveend', 81, 0)
e_4068[82] = EnumInfo('retrieverow', 82, 0)
e_4068[83] = EnumInfo('retrievestart', 83, 0)
e_4068[84] = EnumInfo('rightclicked', 84, 0)
e_4068[85] = EnumInfo('rightdoubleclicked', 85, 0)
e_4068[86] = EnumInfo('rowfocuschanged', 86, 0)
e_4068[87] = EnumInfo('save', 87, 0)
e_4068[88] = EnumInfo('scrollhorizontal', 88, 0)
e_4068[89] = EnumInfo('scrollvertical', 89, 0)
e_4068[90] = EnumInfo('selected', 90, 0)
e_4068[91] = EnumInfo('selectionchanged', 91, 0)
e_4068[92] = EnumInfo('selectionchanging', 92, 0)
e_4068[93] = EnumInfo('show', 93, 0)
e_4068[94] = EnumInfo('sort', 94, 0)
e_4068[95] = EnumInfo('sqlpreview', 95, 0)
e_4068[96] = EnumInfo('stroke', 96, 0)
e_4068[97] = EnumInfo('systemerror', 97, 0)
e_4068[98] = EnumInfo('systemkey', 98, 0)
e_4068[99] = EnumInfo('timer', 99, 0)
e_4068[100] = EnumInfo('toolbarmoved', 100, 0)
e_4068[101] = EnumInfo('treenodeselected', 101, 0)
e_4068[102] = EnumInfo('treenodeselecting', 102, 0)
e_4068[103] = EnumInfo('updateend', 103, 0)
e_4068[104] = EnumInfo('updatestart', 104, 0)
e_4068[105] = EnumInfo('viewchange', 105, 0)
e_4068[106] = EnumInfo('wserror', 106, 0)
enum_main[0x4068] = e_4068
e_4069 = dict()
e_4069[0] = EnumInfo('customvisual', 0, 0)
e_4069[1] = EnumInfo('externalvisual', 1, 0)
e_4069[2] = EnumInfo('vbxvisual', 2, 0)
enum_main[0x4069] = e_4069
e_406a = dict()
e_406a[0] = EnumInfo('multiline', 0, 0)
e_406a[1] = EnumInfo('top', 1, 0)
e_406a[2] = EnumInfo('vcenter', 2, 0)
e_406a[3] = EnumInfo('bottom', 3, 0)
enum_main[0x406a] = e_406a
e_406b = dict()
e_406b[0] = EnumInfo('vticksonright', 0, 0)
e_406b[1] = EnumInfo('vticksonleft', 1, 0)
e_406b[2] = EnumInfo('vticksonboth', 2, 0)
e_406b[3] = EnumInfo('vticksonneither', 3, 0)
enum_main[0x406b] = e_406b
e_406c = dict()
e_406c[0] = EnumInfo('monday', 0, 0)
e_406c[1] = EnumInfo('tuesday', 1, 0)
e_406c[2] = EnumInfo('wednesday', 2, 0)
e_406c[3] = EnumInfo('thursday', 3, 0)
e_406c[4] = EnumInfo('friday', 4, 0)
e_406c[5] = EnumInfo('saturday', 5, 0)
e_406c[6] = EnumInfo('sunday', 6, 0)
enum_main[0x406c] = e_406c
e_406d = dict()
e_406d[0] = EnumInfo('noanimation', 0, 0)
e_406d[1] = EnumInfo('topslide', 1, 0)
e_406d[2] = EnumInfo('bottomslide', 2, 0)
e_406d[3] = EnumInfo('leftslide', 3, 0)
e_406d[4] = EnumInfo('rightslide', 4, 0)
e_406d[5] = EnumInfo('toproll', 5, 0)
e_406d[6] = EnumInfo('bottomroll', 6, 0)
e_406d[7] = EnumInfo('leftroll', 7, 0)
e_406d[8] = EnumInfo('rightroll', 8, 0)
e_406d[9] = EnumInfo('fadeanimation', 9, 0)
e_406d[10] = EnumInfo('centeranimation', 10, 0)
enum_main[0x406d] = e_406d
e_406e = dict()
e_406e[0] = EnumInfo('normal', 0, 0)
e_406e[1] = EnumInfo('minimized', 1, 0)
e_406e[2] = EnumInfo('maximized', 2, 0)
enum_main[0x406e] = e_406e
e_406f = dict()
e_406f[0] = EnumInfo('main', 0, 0)
e_406f[1] = EnumInfo('child', 1, 0)
e_406f[2] = EnumInfo('popup', 2, 0)
e_406f[3] = EnumInfo('response', 3, 0)
e_406f[4] = EnumInfo('mdi', 4, 0)
e_406f[5] = EnumInfo('mdihelp', 5, 0)
enum_main[0x406f] = e_406f
e_4070 = dict()
e_4070[0] = EnumInfo('append', 0, 0)
e_4070[1] = EnumInfo('replace', 1, 0)
enum_main[0x4070] = e_4070
e_4071 = dict()
e_4071[0] = EnumInfo('valnever', 0, 0)
e_4071[1] = EnumInfo('valalways', 1, 0)
e_4071[2] = EnumInfo('valauto', 2, 0)
enum_main[0x4071] = e_4071
e_4072 = dict()
e_4072[0] = EnumInfo('dtflongdate', 0, 0)
e_4072[1] = EnumInfo('dtfshortdate', 1, 0)
e_4072[2] = EnumInfo('dtftime', 2, 0)
e_4072[3] = EnumInfo('dtfcustom', 3, 0)
enum_main[0x4072] = e_4072
e_4073 = dict()
e_4073[0] = EnumInfo('postback', 0, 0)
e_4073[1] = EnumInfo('callback', 1, 0)
e_4073[2] = EnumInfo('xmlclientside', 2, 0)
enum_main[0x4073] = e_4073
e_4074 = dict()
e_4074[0] = EnumInfo('richtexttoolbaractivationnever', 0, 0)
e_4074[1] = EnumInfo('richtexttoolbaractivationonedit', 1, 0)
e_4074[2] = EnumInfo('richtexttoolbaractivationalways', 2, 0)
enum_main[0x4074] = e_4074
e_4075 = dict()
e_4075[0] = EnumInfo('sqldbothers', 0, 0)
e_4075[1] = EnumInfo('sqldbselect', 1, 0)
e_4075[2] = EnumInfo('sqldbinsert', 2, 0)
e_4075[3] = EnumInfo('sqldbdelete', 3, 0)
e_4075[4] = EnumInfo('sqldbupdate', 4, 0)
e_4075[5] = EnumInfo('sqldbprocedure', 5, 0)
e_4075[6] = EnumInfo('sqldbrpc', 6, 0)
enum_main[0x4075] = e_4075
e_40c6 = dict()
e_40c6[1] = EnumInfo('timernone', 1, 0)
e_40c6[2] = EnumInfo('clock', 2, 0)
e_40c6[3] = EnumInfo('process', 3, 0)
e_40c6[4] = EnumInfo('thread', 4, 0)
enum_main[0x40c6] = e_40c6
e_40c7 = dict()
e_40c7[1] = EnumInfo('actroutine', 1, 0)
e_40c7[2] = EnumInfo('actline', 2, 0)
e_40c7[4] = EnumInfo('actesql', 4, 0)
e_40c7[5] = EnumInfo('actobjectcreate', 5, 0)
e_40c7[6] = EnumInfo('actobjectdestroy', 6, 0)
e_40c7[7] = EnumInfo('actuser', 7, 0)
e_40c7[8] = EnumInfo('acterror', 8, 0)
e_40c7[9] = EnumInfo('actbegin', 9, 0)
e_40c7[10] = | |
from database.imports import *
from database.models.base import Base
#some global variables that are used here and there that would be magic otherwise
_plusMinus='\u00B1'
#FIXME MASSIVELY BROKEN
class HasMirrors: #FIXME this should validate that they actually *are* mirrors?
@declared_attr
def mirrors_from_here(cls): #FIXME lots of bugs with files not actually being present!
ctab=cls.__tablename__
cname=cls.__name__
self_assoc=Table('%s_self_assoc'%ctab, cls.metadata,
Column('left_id',ForeignKey('%s.id'%ctab),primary_key=True),
Column('right_id',ForeignKey('%s.id'%ctab),primary_key=True)
)
return relationship('%s'%cname,secondary=self_assoc,primaryjoin=
'%s.id==%s_self_assoc.c.left_id'%(cname,ctab),
secondaryjoin='%s.id==%s_self_assoc.c.right_id'%(cname,ctab),
backref='mirrors_to_here'
)
#def validate_mirror(self,repository):
#TODO FIXME how to actually do this...
#if repository.origin_files:
#return False
@property
def mirrors(self): #TODO fix append? not sure possible
#return list(set(self.mirrors_to_here+self.mirrors_from_here))
out=[self]
out.extend(self.mirrors_to_here)
return out
@property
def files(self):
files_ = []
[files_.extend(m.origin_files) for m in self.mirrors_from_here]
[files_.extend(m.origin_files) for m in self.mirrors_to_here]
files_.extend(self.origin_files)
fs=list(set(files_))
fs.sort()
return fs #FIXME ;_; ugly and slow
class selfmtm:
@declared_attr
def nodes(cls): #can use this for a self ref m-m mixin?
ctab=cls.__tablename__
cname=cls.__name__
self_assoc=Table('%s_self_assoc'%ctab, cls.metadata,
Column('parent_id',ForeignKey('%s.id'%ctab),primary_key=True),
Column('child_id',ForeignKey('%s.id'%ctab),primary_key=True) #FIXME we want to keep track of the expeirment too and those THREE need to be consistent
)
return relationship('%s'%cname,secondary=self_assoc,primaryjoin=
'%s.ontpar_id==%s_self_assoc.parent_id'%(cname,ctab),
secondaryjoin='%s.id==%s_self_assoc.child_id'%(cname,ctab),
backref='offspring'
)
###--------------
### notes mixins
###--------------
class Note: #basically metadata for text... grrrrrr why no arbitrary datatypes :/
id=Column(Integer,primary_key=True)
dateTime=Column(DateTime,default=datetime.now) #FIXME holy butts no ntp batman O_O_O_O_O_O
text=Column(Text,nullable=False)
user_id=None #Column(Integer,ForeignKey('users.id')) #TODO
def __init__(self,text,parent_id=None,dateTime=None):
self.text=text
self.parent_id=int(parent_id)
self.dateTime=dateTime #FIXME may not want this...
class fNote(Note):
def __init__(self,text,parent_id=None,dateTime=None):
self.text=text
self.url=parent_id.url #FIXME misleading?
self.filename=parent_id.filename
self.dateTime=dateTime
class fHasNotes: #for files :/ #TODO V2 we are switching files to ids
@declared_attr
def notes(cls):
tname=cls.__tablename__
cls.Note=type(
'%sNote'%cls.__name__,
(fNote, Base, ),
{ '__tablename__':'%s_notes'%tname,
'url':Column(String, #FIXME nasty errors inbound
nullable=False),
'filename':Column(String, #FIXME nasty errors inbound
nullable=False),
'parent':relationship('%s'%cls.__name__, uselist=False, #FIXME uselist???
backref=backref('_notes')),
'__table_args__':(ForeignKeyConstraint(['url','filename'],['%s.url'%tname,'%s.filename'%tname]),{})
}
)
#return relationship(cls.Note,backref=backref('parent',uselist=False))
return association_proxy('_notes','text',creator=lambda text: cls.Note(text))# FIXME BROKEN creator
class HasNotes: #FIXME this works ok, will allow the addition of the same note to anything basically
@declared_attr
def notes(cls):
tname=cls.__tablename__
cls.Note=type(
'%sNote'%cls.__name__,
(Note, Base, ),
{ '__tablename__':'%s_notes'%tname,
'parent_id':Column(Integer, #FIXME nasty errors inbound
ForeignKey('%s.id'%tname),nullable=False),
'parent':relationship('%s'%cls.__name__, uselist=False, #FIXME uselist???
backref=backref('_notes')),
}
)
#return relationship(cls.Note,backref=backref('parent',uselist=False))
return association_proxy('_notes','text',creator=lambda text: cls.Note(text))
###-------------
### datasource mixins
###-------------
class HasDataFileSources:
@declared_attr
def datafilesources(cls):
datafilesource_association = Table('%s_dfs_assoc'%cls.__tablename__, cls.metadata,
Column('datafilesource_id',ForeignKey('datafilesources.id'),primary_key=True),
Column('%s_id'%cls.__tablename__,ForeignKey('%s.id'%cls.__tablename__), #FIXME .id may not be all?
primary_key=True)
)
return relationship('DataFileSource',secondary=datafilesource_association,
primaryjoin='{0}_dfs_assoc.c.{0}_id=={0}.c.id'.format(cls.__tablename__),
secondaryjoin='DataFileSource.id=={0}_dfs_assoc.c.datafilesource_id'.format(cls.__tablename__),
backref=backref('%s'%cls.__tablename__) #FIXME do we really want this?
)
class HasMetaDataSources:
@declared_attr
def metadatasources(cls):
metadatasource_association = Table('%s_mds_assoc'%cls.__tablename__, cls.metadata,
Column('metadatasource_id',ForeignKey('metadatasources.id'),primary_key=True),
Column('%s_id'%cls.__tablename__,ForeignKey('%s.id'%cls.__tablename__), #FIXME .id may not be all?
primary_key=True)
)
return relationship('MetaDataSource',secondary=metadatasource_association,
primaryjoin='{0}_mds_assoc.c.{0}_id=={0}.c.id'.format(cls.__tablename__),
secondaryjoin='MetaDataSource.id=={0}_mds_assoc.c.metadatasource_id'.format(cls.__tablename__),
backref=backref('%s'%cls.__tablename__) #FIXME do we really want this?
)
###-------------
### data mixins
###-------------
class MetaData: #the way to these is via ParentClass.MetaData which I guess makes sense?
#this stuff is not vectorized... a VectorizedData might be worth considering ala ArrayData
dateTime=Column(DateTime,default=datetime.now)
#value=Column(Float(53),nullable=False)
#abs_error=Column(Float(53))
value=Column( Array(Float(53)) ,nullable=False) #TODO
abs_error=Column( Array(Float(53)) ) #TODO
@validates('parent_id','metadatasource_id','dateTime','value','abs_error')
def _wo(self, key, value): return self._write_once(key, value)
def __init__(self,value,parent_id,metadatasource_id,abs_error=None,dateTime=None): #FIXME want *args @ all?
self.value=value
self.abs_error=abs_error
self.parent_id=int(parent_id) #this is here because of the write once
self.dateTime=dateTime
self.metadatasource_id=int(metadatasource_id)
def __int__(self):
return int(self.value) #FIXME TODO think about this
def __round__(self):
return round(self.value)
def __repr__(self):
mantissa=''
error=''
if self.metadatasource.mantissa: mantissa='mantissa: %s'%self.metadatasource.mantissa
if self.abs_error != None: error='%s %s'%(_plusMinus,self.abs_error)
return '\n%s %s %s %s %s %s'%(self.parent_id,self.dateTime,self.value,self.metadatasource.strHelper(),mantissa,error) #TODO this is where quantities really pays off
class HasMetaData: #FIXME based on how I'm using this right now, the relationship should probably return a dictionary collection indexed by metadatasource_id and ordered by datetime???
#I intentionally do not allow explicit groupings of metadata into higher dimensions, I don't think we will need that, but getting the alignment right for multiple multidimensional measurements will be a problem FIXME
@declared_attr
def metadata_(cls): #FIXME naming...
cls.MetaData = type(
'%sMetaData'%cls.__name__,
(MetaData, Base,),
{ '__tablename__':'%s_metadata'%cls.__tablename__,
'id':Column(Integer,primary_key=True),
'parent_id':Column(Integer, #FIXME nasty errors inbound
ForeignKey('%s.id'%cls.__tablename__),nullable=False),
'metadatasource_id':Column(Integer,
ForeignKey('metadatasources.id'),nullable=False),
'metadatasource':relationship('MetaDataSource'), #keep it one way
#'hardware_id':Column(Integer, #FIXME I *could* put this here but it seems like overkill?
#ForeignKey('hardware.id'),nullable=False), #since w/in experiment it wont change?
#conflict between desire for doccumentation and need for linkage when recording data
#this is the easiest solution but leads to massive data duplication
}
)
return relationship(cls.MetaData) #FIXME may need a primaryjoin on this
###----------------------------------
### data - datasource history mixins
###----------------------------------
class SWC_HW_EXP_BIND: #FIXME move directly into experiments?
datafile_subdata_id=None #TODO? as long as I can get to the things needed for analysis it should be ok
@validates('hardware_id')
def _wo(self, key, value): return self._write_once(key, value)
def __init__(self,parent_id,experiment_id,datafilesource_id,channel_id,hardware_id):
self.parent_id=int(parent_id)
self.experiment_id=int(experiment_id)
self.datafilesource_id=int(datafilesource_id) #this works fine, you just have to pass in SWC twice
self.channel_id=str(channel_id)
class HasSwcHwRecords: #TODO
"""Record of what hardware collected which software channel for which datafilesource/type for a given experiment and the subject that was associated with it what a mess, actually this is a reasonable solution"""
@declared_attr
def swc_hw_records(cls):
tname=cls.__tablename__
cls.SwcHwRecord=type(
'%s_SwcHwRecord'%cls.__name__,
(SWC_HW_EXP_BIND, Base,),
{ '__tablename__':'%s_swchwrecords'%tname,
'parent_id':Column(Integer,ForeignKey('%s.id'%tname), primary_key=True), #has to be here
'experiment_id':Column(Integer,ForeignKey('experiments.id'), primary_key=True),
'softwarechannel_id':Column(Integer,ForeignKey('softwarechannel.id'),primary_key=True),
#'datafilesource_id':Column(Integer, primary_key=True), #pull these from subject.hardware
#'channel_id':Column(String(20), primary_key=True),
#'__table_args__':(
#ForeignKeyConstraint(['datafilesource_id', 'channel_id'],
#['softwarechannel.datafilesource_id','softwarechannel.channel_id']), {}),
#'datafilesource':relationship('DataFileSource',
#primaryjoin='foreign(DataFileSource.id)==%s_swchwrecords.c.datafilesource_id'%tname,
#uselist=False),
'softwarechannel':relationship('SoftwareChannel',
primaryjoin='SoftwareChannel.id==%s_swchwrecords.c.softwarechannel_id'%tname,
#primaryjoin=('and_(SoftwareChannel.datafilesource_id=='
#'%s_swchwrecords.c.datafilesource_id,'
#'SoftwareChannel.channel_id==%s_swchwrecords.c.channel_id)')%(tname,tname),
uselist=False),
'hardware_id':Column(ForeignKey('hardware.id'),nullable=False),
}
)
return relationship(cls.SwcHwRecord)
class DFS_HW_BIND:
#how to use to associate a cell to a channel:
#the cell or subcompartment will have a hardware_id
#join that hardware_id against the DFS_MW_BIND and then the datafile structure when unpacked must match
#I should come up with a way to verify the match, even if it is very simple
@validates('hardware_id') #basically if shit breaks half way through, new experiment
def _wo(self, key, value): return self._write_once(key, value)
def __init__(self,Parent=None,DataFileSource=None,Hardware=None,parent_id=None,datafilesource_id=None,hardware_id=None):
self.parent_id=parent_id
self.datafilesource_id=datafilesource_id
self.hardware_id=hardware_id #FIXME probably need hardware=relationship()
self.AssignID(DataFileSource)
self.AssignID(Hardware)
if Parent:
if Parent.id:
self.parent_id=Parent.id
else:
raise AttributeError
class HasDfsHwRecords: #we bind DFSes to hardware that collects that datafile property #FIXME multiple hardware
@declared_attr
def dfs_hw_records(cls):
cls.DfsHwRecord = type(
'%s_DfsHwRecord'%cls.__name__,
(DFS_HW_BIND, Base,),
{ '__tablename__':'%s_dfshwrecord'%cls.__tablename__,
'parent_id':Column(Integer,
ForeignKey('%s.id'%cls.__tablename__),primary_key=True),
'datafilesource_id':Column(Integer,
ForeignKey('datafilesources.id'),primary_key=True),
'hardware_id':Column(Integer, #FIXME relationship()
ForeignKey('hardware.id')) #FIXME there are multiple hardwares!
}
)
return relationship(cls.DfsHwRecord) #FIXME ideally this should trigger... :/
class MDS_HW_BIND:
"""Class that keeps a record of what hardware was used to record the metadata"""
@validates('hardware_id') #basically if shit breaks half way through, new experiment
def _wo(self, key, value): return self._write_once(key, value)
def __init__(self,parent_id,metadatasource_id,hardware_id):
self.parent_id=int(parent_id) #experiment_id
self.metadatasource_id=int(metadatasource_id) #FIXME watchout on the switch to string pk
self.hardware_id=int(hardware_id) #FIXME probably need hardware=relationship()
class HasMdsHwRecords: #TODO how to enforce a trigger on __init__ unforunately cant because parent_id is needed
#SessionEvents.after_flush()
@declared_attr
def mds_hw_records(cls):
cls.MdsHwRecord = type(
'%s_MdsHwRecord'%cls.__name__,
(MDS_HW_BIND, Base,),
{ '__tablename__':'%s_mdshwrecord'%cls.__tablename__,
'parent_id':Column(Integer, #experiment_id
ForeignKey('%s.id'%cls.__tablename__),primary_key=True),
'metadatasource_id':Column(Integer,
ForeignKey('metadatasources.id'),primary_key=True),
'hardware_id':Column(Integer,
ForeignKey('hardware.id'),nullable=False)
}
)
return relationship(cls.MdsHwRecord)
def __init__(self): #FIXME >_<
session=object_session(self)
mdses=session.query(self.type_id)[0].metadatasources
[session.add(self.MdsHwRecord(self.id,mds.id,mds.hardware_id)) for mds in mdses]
#who the fuck knows how to get event listeners to work for this >_<
###-------------
### file mixins
###-------------
class HasDataFiles:
@declared_attr
def datafiles(cls):
datafile_association = Table('%s_df_assoc'%cls.__tablename__, cls.metadata,
#Column('datafile_url',String,primary_key=True),
#Column('datafile_filename',String,primary_key=True),
#ForeignKeyConstraint(['datafile_url','datafile_filename'],
#['datafile.url','datafile.filename']),
Column('datafile_id', ForeignKey('datafile.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__),
primary_key=True),
)
return relationship('DataFile', secondary=datafile_association,
primaryjoin='{0}_df_assoc.c.{0}_id=={0}.c.id'.format(cls.__tablename__),
#secondaryjoin='and_(DataFile.url=={0}.datafile_url,DataFile.filename=={0}.datafile_filename)'.format(cls.__tablename__+'_df_assoc.c'),
secondaryjoin='DataFile.id=={0}.datafile_id'.format(cls.__tablename__+'_df_assoc.c'),
backref=backref('%s'%cls.__tablename__),
)
class HasFiles:
@declared_attr
def files(cls):
file_association = Table('%s_f_assoc'%cls.__tablename__, cls.metadata,
#Column('file_url',String,primary_key=True),
#Column('file_filename',String,primary_key=True),
#ForeignKeyConstraint(['file_url','file_filename'],
#['file.url','file.filename']),
Column('file_id', ForeignKey('file.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__),
primary_key=True),
)
return relationship('File', secondary=file_association,
primaryjoin='{0}_f_assoc.c.{0}_id=={0}.c.id'.format(cls.__tablename__),
#secondaryjoin='and_(File.url=={0}.file_url,File.filename=={0}.file_filename)'.format(cls.__tablename__+'_f_assoc.c'),
secondaryjoin='File.id=={0}.file_id'.format(cls.__tablename__+'_f_assoc.c'),
backref=backref('%s'%cls.__tablename__),
)
###---------------
### Has citeables
###---------------
class HasCiteables:
@declared_attr
def citeables(cls):
cite_association = Table('%s_citeables'%cls.__tablename__,cls.metadata,
Column('citeable_id', ForeignKey('citeable.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__), primary_key=True))
return relationship('Citeable', secondary=cite_association,backref=backref('%s_citer'%cls.__tablename__))
###--------------
### Has reagents
###--------------
class HasReagentTypes:
@declared_attr
def reagenttypes(cls):
reagenttype_association = Table('%s_reagenttypes'%cls.__tablename__,cls.metadata,
Column('reagenttype_id', ForeignKey('reagenttypes.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__), primary_key=True))
return relationship('ReagentType', secondary=reagenttype_association,backref=backref('%s_used'%cls.__tablename__))
class HasReagents:
@declared_attr
def reagents_used(cls): #FIXME figure out if we can get away with having only one things...
reagent_association = Table('%s_reagents'%cls.__tablename__,cls.metadata,
Column('reagent_id', ForeignKey('reagents.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__), primary_key=True),
#removed foreigkeyconstraint because switched reagents to a surrogate primary key
)
return relationship('Reagent', secondary=reagent_association,backref=backref('used_in_%s'%cls.__tablename__))
###--------------
### Has hardware
###--------------
class HasHardware:
@declared_attr
def hardware(cls):
hardware_association = Table('%s_hardware'%cls.__tablename__,cls.metadata,
Column('hardware_id', ForeignKey('hardware.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__), primary_key=True))
return relationship('Hardware', secondary=hardware_association,backref=backref('%s_used'%cls.__tablename__))
###-----------------
### Has experiments
###-----------------
class HasExperiments:
paramNames=tuple #persistable values that are not filled in at init
preStepNames=tuple
interStepNames=tuple
postStepNames=tuple
child_type=Base #FIXME?
@declared_attr
def experiments(cls):
experiments_association = Table('%s_experiments'%cls.__tablename__,cls.metadata,
Column('experiments_id', ForeignKey('experiments.id'), primary_key=True),
Column('%s_id'%cls.__tablename__, ForeignKey('%s.id'%cls.__tablename__), primary_key=True))
return relationship('Experiment', secondary=experiments_association,backref=backref('%s'%cls.__tablename__))
###-----------------------------------------
### Has properties, hstore, key/value store
###-----------------------------------------
class Properties: #FIXME HasKeyValueStore
"""Not for data!""" #TODO how to query this...
#FIXME this is hstore from postgres except slower and value is not a blob
key=Column(String(50),primary_key=True) #tattoo, eartag, name, *could* use coronal/sagital for slices, seems dubious... same with putting cell types in here... since those are technically results... #FIXME for some reason this accepts ints for a key... FIXME WATCH OUT for the type change on commit! it doesn't update the instace! make sure to expire them
value=Column(String(50)) #if the strings actually need to be longer than 50 we probably want something else
def __repr__(self):
return '\n%s {%s,%s}'%(self.parent_id,self.key,self.value)
def strHelper(self,depth=0):
| |
<gh_stars>1-10
from BWSDefinitions import *
LanguageUsed = 1 # 0 is japanese, 1 is translation patch, globals are bad I know
def SetLanguageUsed(v):
global LanguageUsed
LanguageUsed = v
class UnknownAttributeError(Exception):
pass
class UnknownCommandError(Exception):
pass
class UnknownItemError(Exception):
pass
def to_five_bit_signed(value):
if value >= 0:
return value & 0xF
else:
return (value + 0x10) & 0x1F
def to_six_bit_signed(value):
if value >= 0:
return value & 0x1F
else:
return (value + 0x30) & 0x3F
def to_eight_bit_signed(value):
if value >= 0:
return value & 0x7F
else:
return (value + 0x80) & 0xFF
def write_x_bits(buffer, offset, x_bits, bit_offset, value):
value &= ((1 << x_bits) - 1)
buffer[offset] = (buffer[offset] & (0xFF - (((1 << x_bits) - 1) << bit_offset) & 0xFF)) + ((value << bit_offset) & 0xFF)
if bit_offset + x_bits > 8:
buffer[offset + 1] = (buffer[offset + 1] & (0xFF - (((1 << x_bits) - 1) >> (8 - bit_offset)))) + (
value >> (8 - bit_offset))
def read_x_bits(buffer, offset, x_bits, bit_offset):
value = ((0xFF >> (8 - x_bits if 8 - x_bits > 0 else 0) << bit_offset) & 0xFF & buffer[offset]) >> bit_offset
if bit_offset + x_bits > 8:
value += (buffer[offset + 1] & ((1 << bit_offset + x_bits - 8) - 1)) << (8 - bit_offset)
return value
def modify_x_bits(buffer, offset, x_bits, bit_offset, value, modifier, ma=65535, mi=0):
if modifier == 1:
value = read_x_bits(buffer, offset, x_bits, bit_offset) + value
elif modifier == -1:
value = read_x_bits(buffer, offset, x_bits, bit_offset) - value
elif modifier == 2:
value = int(read_x_bits(buffer, offset, x_bits, bit_offset) * value)
value = 0 if value < 0 else (1 << x_bits) - 1 if value >= 1 << x_bits else value
if modifier != 0: # if we just set the value, no checking here, for extreme cases
value = max(mi, min(ma, value))
write_x_bits(buffer, offset, x_bits, bit_offset, value)
def set_base(buffer, unit, stat, value):
value, modifier = value
offsets = UnitOffsets[LanguageUsed][0] + UnitToOffset[unit], UnitOffsets[LanguageUsed][1] + UnitToOffset[unit]
for offset in offsets:
if stat == "level" or stat == "lv" or stat == "rank":
modify_x_bits(buffer, offset + 20, 6, 0, value, modifier)
elif stat == "hp":
modify_x_bits(buffer, offset + 22, 7, 4, value, modifier)
elif stat == "strength" or stat == "str":
modify_x_bits(buffer, offset + 23, 5, 3, to_five_bit_signed(value), modifier)
elif stat == "speed" or stat == "spe" or stat == "spd":
modify_x_bits(buffer, offset + 24, 5, 0, to_five_bit_signed(value), modifier)
elif stat == "luck" or stat == "luk":
modify_x_bits(buffer, offset + 24, 5, 5, to_five_bit_signed(value), modifier)
elif stat == "defense" or stat == "def":
modify_x_bits(buffer, offset + 25, 5, 2, to_five_bit_signed(value), modifier)
elif stat == "mind" or stat == "magic" or stat == "mag":
modify_x_bits(buffer, offset + 25, 5, 7, to_five_bit_signed(value), modifier)
elif stat == "knife":
modify_x_bits(buffer, offset + 36, 10, 0, value * 10, modifier)
elif stat == "sword":
modify_x_bits(buffer, offset + 37, 10, 2, value * 10, modifier)
elif stat == "spear" or stat == "lance":
modify_x_bits(buffer, offset + 38, 10, 4, value * 10, modifier)
elif stat == "axe":
modify_x_bits(buffer, offset + 40, 10, 0, value * 10, modifier)
elif stat == "bow":
modify_x_bits(buffer, offset + 41, 10, 2, value * 10, modifier)
elif stat == "crossbow":
modify_x_bits(buffer, offset + 42, 10, 4, value * 10, modifier)
elif stat == "fire":
modify_x_bits(buffer, offset + 44, 10, 0, value * 10, modifier)
elif stat == "thunder":
modify_x_bits(buffer, offset + 45, 10, 2, value * 10, modifier)
elif stat == "wind":
modify_x_bits(buffer, offset + 46, 10, 4, value * 10, modifier)
elif stat == "holy" or stat == "light":
modify_x_bits(buffer, offset + 48, 10, 0, value * 10, modifier)
elif stat == "dark":
modify_x_bits(buffer, offset + 49, 10, 2, value * 10, modifier)
elif stat == "sshield":
modify_x_bits(buffer, offset + 50, 10, 4, value * 10, modifier)
elif stat == "mshield":
modify_x_bits(buffer, offset + 52, 10, 0, value * 10, modifier)
elif stat == "lshield":
modify_x_bits(buffer, offset + 53, 10, 2, value * 10, modifier)
elif stat == "offhand":
modify_x_bits(buffer, offset + 26, 4, 4, value + 1, modifier) # 0 is unequipped
elif stat == "mainhand":
modify_x_bits(buffer, offset + 27, 4, 0, value + 1, modifier) # 0 is unequipped
else:
raise UnknownAttributeError
def set_growth(buffer, unit, stat, value):
value, modifier = value
offsets = GrowthOffsets[LanguageUsed][0] + (UnitToIndex[unit] - 1) * 32, GrowthOffsets[LanguageUsed][1] + (UnitToIndex[unit] - 1) * 32
if stat == "bracket" and not value.isdigit():
value = {"no": 1, "loose": 2, "tight": 3}[value]
for offset in offsets:
if stat == "hp":
modify_x_bits(buffer, offset, 7, 0, value, modifier)
elif stat == "strength" or stat == "str":
modify_x_bits(buffer, offset, 7, 7, value, modifier)
elif stat == "mind" or stat == "magic" or stat == "mag":
modify_x_bits(buffer, offset + 5, 7, 0, value, modifier)
elif stat == "speed" or stat == "spe" or stat == "spd":
modify_x_bits(buffer, offset + 4, 7, 2, value, modifier)
elif stat == "defense" or stat == "def":
modify_x_bits(buffer, offset + 1, 7, 6, value, modifier)
elif stat == "knife":
modify_x_bits(buffer, offset + 8, 4, 0, value // 10, modifier)
elif stat == "sword":
modify_x_bits(buffer, offset + 8, 4, 4, value // 10, modifier)
elif stat == "spear" or stat == "lance":
modify_x_bits(buffer, offset + 9, 4, 0, value // 10, modifier)
elif stat == "axe":
modify_x_bits(buffer, offset + 9, 4, 4, value // 10, modifier)
elif stat == "bow":
modify_x_bits(buffer, offset + 10, 4, 0, value // 10, modifier)
elif stat == "crossbow":
modify_x_bits(buffer, offset + 10, 4, 4, value // 10, modifier)
elif stat == "fire":
modify_x_bits(buffer, offset + 11, 4, 0, value // 10, modifier)
elif stat == "thunder":
modify_x_bits(buffer, offset + 11, 4, 4, value // 10, modifier)
elif stat == "wind":
modify_x_bits(buffer, offset + 12, 4, 0, value // 10, modifier)
elif stat == "holy" or stat == "light":
modify_x_bits(buffer, offset + 12, 4, 4, value // 10, modifier)
elif stat == "dark":
modify_x_bits(buffer, offset + 13, 4, 0, value // 10, modifier)
elif stat == "shield" or stat == "sshield" or stat == "mshield" or stat == "lshield":
modify_x_bits(buffer, offset + 13, 4, 4, value // 10, modifier)
elif stat == "bracket":
write_x_bits(buffer, offset + 2, 2, 5, value)
else:
raise UnknownAttributeError
def set_skill(buffer, unit, skll_name, value):
index = Skills.index(skll_name)
offset = UnitOffsets[LanguageUsed][0] + UnitToOffset[unit]
write_x_bits(buffer, offset + 56 + index // 8, 1, index & 0x7, value)
offset = UnitOffsets[LanguageUsed][1] + UnitToOffset[unit]
write_x_bits(buffer, offset + 56 + index // 8, 1, index & 0x7, value)
def set_item(buffer, unit, slot, item, durability, is_locked, is_dropped):
slot, _ = slot
durability, _ = durability
offsets = UnitOffsets[LanguageUsed][0] + UnitToOffset[unit] + 0xBC + slot * 8, UnitOffsets[LanguageUsed][1] + UnitToOffset[unit] + 0xBC + slot * 8
for offset in offsets:
write_x_bits(buffer, offset, 16, 0, item)
write_x_bits(buffer, offset + 2, 8, 4, durability)
write_x_bits(buffer, offset + 4, 1, 2, int(is_locked))
write_x_bits(buffer, offset + 3, 1, 7, int(is_dropped))
def set_bag_item(buffer, unit, slot, item, durability, is_locked, is_dropped):
pass
# TODO: implement this
def set_learned(buffer, unit, slot, skill, level):
slot, _ = slot
level, modifier = level
if isinstance(skill, str):
skill = Skills2.index(skill)
offset = GrowthOffsets[LanguageUsed][0] + (UnitToIndex[unit] - 1) * 32
modify_x_bits(buffer, offset + 20 + slot, 8, 0, level, modifier)
write_x_bits(buffer, offset + 26 + slot, 8, 0, skill)
offset = GrowthOffsets[LanguageUsed][1] + (UnitToIndex[unit] - 1) * 32
modify_x_bits(buffer, offset + 20 + slot, 8, 0, level, modifier)
write_x_bits(buffer, offset + 26 + slot, 8, 0, skill)
def set_support(buffer, unit, slot, source, amount):
pass
def set_item_stat(buffer, item, stat, value):
value, modifier = value
offsets = ItemOffsets[LanguageUsed][0] + (ItemToIndex[item] - 1) * 56, ItemOffsets[LanguageUsed][1] + (ItemToIndex[item] - 1) * 56
for offset in offsets:
if stat == "might":
modify_x_bits(buffer, offset, 6, 5, value, modifier)
elif stat == "hex":
modify_x_bits(buffer, offset + 1, 4, 3, value, modifier)
elif stat == "accuracy":
modify_x_bits(buffer, offset + 1, 7, 7, value, modifier)
elif stat == "weight":
modify_x_bits(buffer, offset + 2, 5, 6, value, modifier)
elif stat == "max_range" or stat == | |
"""
DynaMake module.
"""
# pylint: disable=too-many-lines,redefined-builtin,unspecified-encoding
import argparse
import asyncio
import logging
import os
import re
import shlex
import shutil
import sys
import warnings
from argparse import ArgumentParser
from argparse import Namespace
from contextlib import asynccontextmanager
from copy import copy
from datetime import datetime
from glob import glob as glob_files
from importlib import import_module
from inspect import getsourcefile
from inspect import getsourcelines
from inspect import iscoroutinefunction
from stat import S_ISDIR
from textwrap import dedent
from threading import current_thread
from typing import Any
from typing import AsyncGenerator
from typing import Awaitable
from typing import Callable
from typing import Coroutine
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
from typing import overload
from typing.re import Pattern # type: ignore # pylint: disable=import-error
from urllib.parse import quote_plus
import yaml # type: ignore
from aiorwlock import RWLock
from aiorwlock import _ReaderLock
from aiorwlock import _WriterLock
from sortedcontainers import SortedDict # type: ignore
from yaml import Dumper
from yaml import Loader
from yaml import Node
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.6.2-dev.1"
_REGEXP_ERROR_POSITION = re.compile(r"(.*) at position (\d+)")
__all__ = [
# Step decorator
"step",
"above",
"outputs",
"output",
"inputs",
"input",
# Build operations
"require",
"try_require",
"sync",
"spawn",
"shell",
"can_make",
# Glob operations
"Captured",
"NonOptionalException",
"glob_capture",
"glob_paths",
"glob_extract",
"glob_fmt",
# File system operations
"Stat",
# Collections of strings
"each_string",
"flatten",
# Annotated strings
"AnnotatedStr",
"copy_annotations",
"optional",
"is_optional",
"exists",
"is_exists",
"phony",
"is_phony",
"precious",
"is_precious",
# Custom command line arguments.
"Parameter",
"str2bool",
"str2enum",
"str2float",
"str2int",
"str2choice",
"str2list",
"str2optional",
# Builtin command line arguments.
"shell_executable",
"jobs",
"log_level",
"log_skipped_actions",
"no_actions",
"rebuild_changed_actions",
"persistent_directory",
"failure_aborts_build",
"remove_stale_outputs",
"touch_success_outputs",
"remove_failed_outputs",
"remove_empty_directories",
"default_shell_prefix",
"resource_parameters",
# Logging.
"Logger",
# Main function.
"make",
# Misceleneous utilities.
"expand",
"clean_path",
"exec_file",
# Async wrappers.
"done",
"context",
# Read/write lock wrappers.
"reading",
"writing",
"locks",
]
def no_additional_complaints() -> None:
"""
Disable all warnings when aborting execution.
"""
logging.getLogger("asyncio").setLevel("CRITICAL")
warnings.simplefilter("ignore")
def capture2re(capture: str) -> str: # pylint: disable=too-many-statements
"""
Translate a capture pattern to the equivalent ``re.Pattern``.
"""
index = 0
size = len(capture)
results: List[str] = []
def _is_next(expected: str) -> bool:
nonlocal capture, index, size
return index < size and capture[index] == expected
def _invalid(reason: str = "") -> None:
nonlocal capture, index
no_additional_complaints()
raise RuntimeError(f'Invalid capture pattern:\n{capture}\n{index * " "}^ {reason}')
def _expect_close() -> None:
if not _is_next("}"):
_invalid("missing }")
nonlocal index
index += 1
def _parse_name(terminators: str) -> str:
nonlocal capture, index, size
start_index = index
while index < size and capture[index] not in terminators:
if capture[index] != "_" and not capture[index].isalnum():
_invalid("invalid captured name character")
index += 1
if index == start_index:
_invalid("empty captured name")
return capture[start_index:index]
def _parse_regexp() -> str:
nonlocal capture, index, size
if not _is_next(":"):
return ""
index += 1
start_index = index
while index < size and capture[index] != "}":
index += 1
if index == start_index:
_invalid("empty captured regexp")
return glob2re(capture[start_index:index])
def _parse_two_stars() -> None:
name = _parse_name("}")
regexp = _parse_regexp() or ".*"
_expect_close()
nonlocal capture, index, size, results
if results and results[-1] == "/" and index < size and capture[index] == "/":
index += 1
_append_regexp(name, regexp, "(?:", "/)?")
else:
_append_regexp(name, regexp)
def _parse_one_star() -> None:
name = _parse_name(":}")
regexp = _parse_regexp() or "[^/]*"
_expect_close()
_append_regexp(name, regexp)
def _parse_no_star() -> None:
results.append("{")
results.append(_parse_name(":}"))
_expect_close()
results.append("}")
def _append_regexp(name: str, regexp: str, prefix: str = "", suffix: str = "") -> None:
nonlocal results
results.append(prefix)
results.append("(?P<")
results.append(name)
results.append(">")
results.append(regexp)
results.append(")")
results.append(suffix)
while index < size:
char = capture[index]
index += 1
if char == "}" and _is_next("}"):
results.append("}}")
index += 1
elif char == "{" and _is_next("{"):
results.append("{{")
index += 1
elif char == "{" and _is_next("*"):
index += 1
if _is_next("*"):
index += 1
_parse_two_stars()
else:
_parse_one_star()
elif char == "{":
_parse_no_star()
elif char in "{}/":
results.append(char)
else:
results.append(re.escape(char))
return "".join(results)
def capture2glob(capture: str) -> str: # pylint: disable=too-many-statements
"""
Translate a capture pattern to the equivalent ``glob`` pattern.
"""
index = 0
size = len(capture)
results: List[str] = []
def _is_next(expected: str) -> bool:
nonlocal capture, index, size
return index < size and capture[index] == expected
def _invalid(reason: str = "") -> None:
nonlocal capture, index
no_additional_complaints()
raise RuntimeError(f'Invalid capture pattern:\n{capture}\n{index * " "}^ {reason}')
def _parse_glob(glob: str, terminators: str) -> None:
nonlocal capture, index, size
while index < size and capture[index] not in terminators:
index += 1
if index < size and capture[index] == ":":
index += 1
start_index = index
while index < size and capture[index] != "}":
index += 1
glob = capture[start_index:index]
if not _is_next("}"):
_invalid("missing }")
index += 1
results.append(glob)
while index < size:
char = capture[index]
index += 1
if char == "}" and _is_next("}"):
results.append("}")
index += 1
elif char == "{" and _is_next("{"):
results.append("{")
index += 1
elif char == "{" and _is_next("*"):
index += 1
if _is_next("*"):
index += 1
_parse_glob("**", "}")
else:
_parse_glob("*", ":}")
else:
results.append(char)
return "".join(results)
def _fmt_capture(kwargs: Dict[str, Any], capture: str) -> str: # pylint: disable=too-many-statements
index = 0
size = len(capture)
results: List[str] = []
def _is_next(expected: str) -> bool:
nonlocal capture, index, size
return index < size and capture[index] == expected
def _invalid(reason: str = "") -> None:
nonlocal capture, index
no_additional_complaints()
raise RuntimeError(f'Invalid capture pattern:\n{capture}\n{index * " "}^ {reason}')
def _expect_close() -> None:
if not _is_next("}"):
_invalid("missing }")
nonlocal index
index += 1
def _parse_name(terminators: str) -> str:
nonlocal capture, index, size
start_index = index
while index < size and capture[index] not in terminators:
if capture[index] != "_" and not capture[index].isalnum():
_invalid("invalid captured name character")
index += 1
if index == start_index:
_invalid("empty captured name")
return capture[start_index:index]
def _parse_regexp(to_copy: bool) -> None:
nonlocal capture, index, size
start_index = index
while index < size and capture[index] != "}":
index += 1
if to_copy:
results.append(capture[start_index:index])
while index < size:
char = capture[index]
index += 1
if char == "}" and _is_next("}"):
results.append("}}")
index += 1
elif char == "{" and _is_next("{"):
results.append("{{")
index += 1
elif char == "{":
stars = 0
while _is_next("*"):
index += 1
stars += 1
name = _parse_name(":}")
if name in kwargs:
results.append(kwargs[name].replace("{", "{{").replace("}", "}}"))
_parse_regexp(False)
_expect_close()
else:
results.append("{")
results.append(stars * "*")
results.append(name)
_parse_regexp(True)
_expect_close()
results.append("}")
else:
results.append(char)
return "".join(results)
def glob2re(glob: str) -> str: # pylint: disable=too-many-branches
"""
Translate a ``glob`` pattern to the equivalent ``re.Pattern`` (as a string).
This is subtly different from ``fnmatch.translate`` since we use it to match the result of a successful ``glob``
rather than to actually perform the ``glob``.
"""
index = 0
size = len(glob)
results: List[str] = []
while index < size:
char = glob[index]
index += 1
if char == "*":
if index < size and glob[index] == "*":
index += 1
if results and results[-1] == "/" and index < size and glob[index] == "/":
results.append("(.*/)?")
index += 1
else:
results.append(".*")
else:
results.append("[^/]*")
elif char == "?":
results.append("[^/]")
elif char == "[":
end_index = index
while end_index < size and glob[end_index] != "]":
end_index += 1
if end_index >= size:
results.append("\\[")
else:
characters = glob[index:end_index].replace("\\", "\\\\")
index = end_index + 1
results.append("[")
if characters[0] == "!":
results.append("^/")
characters = characters[1:]
elif characters[0] == "^":
results.append("\\")
results.append(characters)
results.append("]")
elif char in "{}/":
results.append(char)
else:
results.append(re.escape(char))
return "".join(results)
def _load_glob(loader: Loader, node: Node) -> Pattern:
return re.compile(glob2re(loader.construct_scalar(node)))
yaml.add_constructor("!g", _load_glob, Loader=yaml.FullLoader)
def _load_regexp(loader: Loader, node: Node) -> Pattern:
return re.compile(loader.construct_scalar(node))
yaml.add_constructor("!r", _load_regexp, Loader=yaml.FullLoader)
#: An arbitrarily nested list of strings.
#:
#: This should really have been ``Strings = Union[None, str, List[Strings]]`` but ``mypy`` can't handle nested types.
#: Therefore, do not use this as a return type; as much as possible, return a concrete type (``str``, ``List[str]``,
#: etc.). Instead use ``Strings`` as an argument type, for functions that :py:func:`dynamake.flatten` their arguments.
#: This will allow the callers to easily nest lists without worrying about flattening themselves.
Strings = Union[
None,
str,
Sequence[str],
Sequence[Sequence[str]],
Sequence[Sequence[Sequence[str]]],
Sequence[Sequence[Sequence[Sequence[str]]]],
]
#: Same as ``Strings`` but without the actual ``str`` type, for ``overload`` specifications.
NotString = Union[
None,
Sequence[str],
Sequence[Sequence[str]],
Sequence[Sequence[Sequence[str]]],
Sequence[Sequence[Sequence[Sequence[str]]]],
]
def each_string(*args: Strings) -> Iterator[str]:
"""
Iterate on all strings in an arbitrarily nested list of strings.
"""
for strings in args:
if isinstance(strings, str):
yield strings
elif strings is not None:
yield | |
if (@{{x}}['indexOf']("'") == -1)
return "'" + @{{x}}+ "'";
if (@{{x}}['indexOf']('"') == -1)
return '"' + @{{x}}+ '"';
var s = @{{x}}['$$replace'](new RegExp('"', "g"), '\\\\"');
return '"' + s + '"';
}
""")
if hasattr(x, '__repr__'):
if callable(x):
return x.__repr__(x)
return x.__repr__()
JS("""
if (t == "function")
return "<function " + @{{x}}['toString']() + ">";
// If we get here, x is an object. See if it's a Pyjamas class.
if (!@{{hasattr}}(@{{x}}, "__init__"))
return "<" + @{{x}}['toString']() + ">";
// Handle the common Pyjamas data types.
var constructor = "UNKNOWN";
constructor = @{{get_pyjs_classtype}}(@{{x}});
//alert("repr constructor: " + constructor);
// If we get here, the class isn't one we know -> return the class name.
// Note that we replace underscores with dots so that the name will
// (hopefully!) look like the original Python name.
// (XXX this was for pyjamas 0.4 but may come back in an optimised mode)
//var s = constructor['$$replace'](new RegExp('_', "g"), '.');
return "<" + constructor + " object>";
""")
def len(object):
v = 0
JS("""
if (typeof @{{object}}== 'undefined') {
throw @{{UndefinedValueError}}("obj");
}
if (@{{object}}=== null)
return @{{v}};
else if (typeof @{{object}}['__array'] != 'undefined')
@{{v}} = @{{object}}['__array']['length'];
else if (typeof @{{object}}['__len__'] == 'function')
@{{v}} = @{{object}}['__len__']();
else if (typeof @{{object}}['length'] != 'undefined')
@{{v}} = @{{object}}['length'];
else throw @{{TypeError}}("object has no len()");
if (@{{v}}['__number__'] & 0x06) return @{{v}};
""")
return INT(v)
def isinstance(object_, classinfo):
JS("""
if (typeof @{{object_}}== 'undefined') {
return false;
}
if (@{{object_}}== null) {
if (@{{classinfo}}== null) {
return true;
}
return false;
}
switch (@{{classinfo}}['__name__']) {
case 'float':
return typeof @{{object_}}== 'number' && @{{object_}}['__number__'] == 0x01 && isFinite(@{{object_}});
case 'int':
case 'float_int':
if (@{{object_}}!== null
&& @{{object_}}['__number__']) {
if (@{{object_}}['__number__'] == 0x02) {
return true;
}
if (isFinite(@{{object_}}) &&
Math['ceil'](@{{object_}}) == @{{object_}}) {
return true;
}
}
return false;
case 'basestring':
case 'str':
return typeof @{{object_}}== 'string';
case 'bool':
return typeof @{{object_}}== 'boolean';
case 'long':
return @{{object_}}['__number__'] == 0x04;
}
if (typeof @{{object_}}!= 'object' && typeof @{{object_}}!= 'function') {
return false;
}
""")
if _isinstance(classinfo, tuple):
if _isinstance(object_, tuple):
return True
for ci in classinfo:
if isinstance(object_, ci):
return True
return False
else:
return _isinstance(object_, classinfo)
def _isinstance(object_, classinfo):
JS("""
if ( @{{object_}}['__is_instance__'] !== true
|| @{{classinfo}}['__is_instance__'] === null) {
return false;
}
var __mro__ = @{{object_}}['__mro__'];
var n = __mro__['length'];
if (@{{classinfo}}['__is_instance__'] === false) {
while (--n >= 0) {
if (@{{object_}}['__mro__'][n] === @{{classinfo}}['prototype']) {
return true;
}
}
return false;
}
while (--n >= 0) {
if (@{{object_}}['__mro__'][n] === @{{classinfo}}['__class__']) return true;
}
return false;
""")
def issubclass(class_, classinfo):
if JS(""" typeof @{{class_}} == 'undefined' || @{{class_}} === null || @{{class_}}['__is_instance__'] !== false """):
raise TypeError("arg 1 must be a class")
if isinstance(classinfo, tuple):
for ci in classinfo:
if issubclass(class_, ci):
return True
return False
else:
if JS(""" typeof @{{classinfo}} == 'undefined' || @{{classinfo}}['__is_instance__'] !== false """):
raise TypeError("arg 2 must be a class or tuple of classes")
return _issubtype(class_, classinfo)
def _issubtype(object_, classinfo):
JS("""
if ( @{{object_}}['__is_instance__'] === null
|| @{{classinfo}}['__is_instance__'] === null) {
return false;
}
var __mro__ = @{{object_}}['__mro__'];
var n = __mro__['length'];
if (@{{classinfo}}['__is_instance__'] === false) {
while (--n >= 0) {
if (@{{object_}}['__mro__'][n] === @{{classinfo}}['prototype']) {
return true;
}
}
return false;
}
while (--n >= 0) {
if (@{{object_}}['__mro__'][n] === @{{classinfo}}['__class__']) return true;
}
return false;
""")
def __getattr_check(attr, attr_left, attr_right, attrstr,
bound_methods, descriptors,
attribute_checking, source_tracking):
"""
(function(){
var $pyjs__testval;
var v, vl; /* hmm.... */
if (bound_methods || descriptors) {
pyjs__testval = (v=(vl=attr_left)[attr_right]) == null ||
((vl.__is_instance__) &&
typeof v == 'function');
if (descriptors) {
pyjs_testval = pyjs_testval ||
(typeof v['__get__'] == 'function');
}
pyjs__testval = (pyjs__testval ?
@{{getattr}}(vl, attr_right):
attr);
} else {
pyjs__testval = attr;
}
return (typeof $pyjs__testval=='undefined'?
(function(){throw TypeError(attrstr + " is undefined");})():
$pyjs__testval);
)();
"""
pass
def getattr(obj, name, default_value=None):
JS("""
if (@{{obj}}=== null || typeof @{{obj}}== 'undefined') {
if (arguments['length'] != 3 || typeof @{{obj}}== 'undefined') {
throw @{{AttributeError}}("'" + @{{repr}}(@{{obj}}) + "' has no attribute '" + @{{name}}+ "'");
}
return @{{default_value}};
}
var mapped_name = attrib_remap['indexOf'](@{{name}}) < 0 ? @{{name}}:
'$$'+@{{name}};
if (typeof @{{obj}}[mapped_name] == 'undefined') {
if (arguments['length'] != 3) {
if (@{{obj}}['__is_instance__'] === true &&
typeof @{{obj}}['__getattr__'] == 'function') {
return @{{obj}}['__getattr__'](@{{name}});
}
throw @{{AttributeError}}("'" + @{{repr}}(@{{obj}}) + "' has no attribute '" + @{{name}}+ "'");
}
return @{{default_value}};
}
var method = @{{obj}}[mapped_name];
if (method === null) return method;
if (typeof method['__get__'] == 'function') {
if (@{{obj}}['__is_instance__']) {
return method['__get__'](@{{obj}}, @{{obj}}['__class__']);
}
return method['__get__'](null, @{{obj}}['__class__']);
}
if ( typeof method != 'function'
|| typeof method['__is_instance__'] != 'undefined'
|| @{{obj}}['__is_instance__'] !== true
|| @{{name}}== '__class__') {
return @{{obj}}[mapped_name];
}
var fnwrap = function() {
return method['apply'](@{{obj}},$pyjs_array_slice['call'](arguments));
};
fnwrap['__name__'] = @{{name}};
fnwrap['__args__'] = @{{obj}}[mapped_name]['__args__'];
fnwrap['__class__'] = @{{obj}}['__class__'];
fnwrap['__doc__'] = method['__doc__'] || '';
fnwrap['__bind_type__'] = @{{obj}}[mapped_name]['__bind_type__'];
if (typeof @{{obj}}[mapped_name]['__is_instance__'] != 'undefined') {
fnwrap['__is_instance__'] = @{{obj}}[mapped_name]['__is_instance__'];
} else {
fnwrap['__is_instance__'] = false;
}
return fnwrap;
""")
def _del(obj):
JS("""
if (typeof @{{obj}}['__delete__'] == 'function') {
@{{obj}}['__delete__'](@{{obj}});
} else {
delete @{{obj}};
}
""")
def delattr(obj, name):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw @{{UndefinedValueError}}("obj");
}
if (typeof @{{name}}!= 'string') {
throw @{{TypeError}}("attribute name must be string");
}
if (@{{obj}}['__is_instance__'] && typeof @{{obj}}['__delattr__'] == 'function') {
@{{obj}}['__delattr__'](@{{name}});
return;
}
var mapped_name = attrib_remap['indexOf'](@{{name}}) < 0 ? @{{name}}:
'$$'+@{{name}};
if ( @{{obj}}!== null
&& (typeof @{{obj}}== 'object' || typeof @{{obj}}== 'function')
&& (typeof(@{{obj}}[mapped_name]) != "undefined") ){
if (@{{obj}}['__is_instance__']
&& typeof @{{obj}}[mapped_name]['__delete__'] == 'function') {
@{{obj}}[mapped_name]['__delete__'](@{{obj}});
} else {
delete @{{obj}}[mapped_name];
}
return;
}
if (@{{obj}}=== null) {
throw @{{AttributeError}}("'NoneType' object"+
"has no attribute '"+@{{name}}+"'");
}
if (typeof @{{obj}}!= 'object' && typeof @{{obj}}== 'function') {
throw @{{AttributeError}}("'"+typeof(@{{obj}})+
"' object has no attribute '"+@{{name}}+"'");
}
throw @{{AttributeError}}(@{{obj}}['__name__']+
" instance has no attribute '"+ @{{name}}+"'");
""")
def setattr(obj, name, value):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw @{{UndefinedValueError}}("obj");
}
if (typeof @{{name}}!= 'string') {
throw @{{TypeError}}("attribute name must be string");
}
if (@{{obj}}['__is_instance__'] && typeof @{{obj}}['__setattr__'] == 'function') {
@{{obj}}['__setattr__'](@{{name}}, @{{value}})
return;
}
if (attrib_remap['indexOf'](@{{name}}) >= 0) {
@{{name}}= '$$' + @{{name}};
}
if ( typeof @{{obj}}[@{{name}}] != 'undefined'
&& @{{obj}}['__is_instance__']
&& @{{obj}}[@{{name}}] !== null
&& typeof @{{obj}}[@{{name}}]['__set__'] == 'function') {
@{{obj}}[@{{name}}]['__set__'](@{{obj}}, @{{value}});
} else {
@{{obj}}[@{{name}}] = @{{value}};
}
""")
def hasattr(obj, name):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw @{{UndefinedValueError}}("obj");
}
if (typeof @{{name}} != 'string') {
throw @{{TypeError}}("attribute name must be string");
}
if (@{{obj}}=== null) return false;
if (attrib_remap['indexOf'](@{{name}}) > 0) {
return typeof @{{obj}}['$$'+@{{name}}] != 'undefined';
}
return typeof @{{obj}}[@{{name}}] != 'undefined';
""")
def dir(obj):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw @{{UndefinedValueError}}("obj");
}
var properties=@{{list}}();
for (var property in @{{obj}}) {
if (property['substring'](0,2) == '$$') {
// handle back mapping of name
properties['append'](property['substring'](2));
} else if (attrib_remap['indexOf'](property) < 0) {
properties['append'](property);
}
}
return properties;
""")
#################
#+chopin
__builtin_vars__=['__doc__', '__module__', '__main__', '__dict__', '__is_instance__', '__name__', '__number__', '__md5__', '__mro__', '__super_classes__', '__sub_classes__', '__args__']
def vars(obj):
variables=dict()
for name in dir(obj):
v=getattr(obj, name)
if name not in __builtin_vars__ and not callable(getattr(obj, name)):
variables[name]=v
return variables
__id_current__=1
def id(obj):
JS("""
if(typeof @{{obj}}=='object'){
if (@{{obj}}.__pyjs_object_id__){
return @{{obj}}.__pyjs_object_id__;
}else{
@{{obj}}.__pyjs_object_id__=@{{__id_current__}};
@{{__id_current__}}+=1;
return @{{obj}}.__pyjs_object_id__;
}
}else{
return @{{obj}}
}
""")
#
##################
def filter(obj, method, sequence=None):
# object context is LOST when a method is passed, hence object must be passed separately
# to emulate python behaviour, should generate this code inline rather than as a function call
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
if method(item):
items.append(item)
else:
for item in sequence:
if method.call(obj, item):
items.append(item)
return items
def map(obj, method, sequence=None):
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
items.append(method(item))
else:
for item in sequence:
items.append(method.call(obj, item))
return items
def reduce(func, iterable, initializer=JS("(function(){return;})()")):
try:
iterable = iter(iterable)
except:
raise TypeError, "reduce() arg 2 must support iteration"
emtpy = True
for value in iterable:
emtpy = False
if JS("typeof @{{initializer}}== 'undefined'"):
initializer = value
else:
initializer = func(initializer, value)
if empty:
if JS("typeof @{{initializer}}== 'undefined'"):
raise TypeError, "reduce() | |
<reponame>huaweicloud/huaweicloud-sdk-python-v3
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowRecordSetWithLineResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'zone_id': 'str',
'zone_name': 'str',
'type': 'str',
'ttl': 'int',
'records': 'list[str]',
'created_at': 'str',
'updated_at': 'str',
'status': 'str',
'default': 'bool',
'project_id': 'str',
'links': 'PageLink',
'line': 'str',
'weight': 'int',
'health_check_id': 'str',
'alias_target': 'AliasTarget'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'zone_id': 'zone_id',
'zone_name': 'zone_name',
'type': 'type',
'ttl': 'ttl',
'records': 'records',
'created_at': 'created_at',
'updated_at': 'updated_at',
'status': 'status',
'default': 'default',
'project_id': 'project_id',
'links': 'links',
'line': 'line',
'weight': 'weight',
'health_check_id': 'health_check_id',
'alias_target': 'alias_target'
}
def __init__(self, id=None, name=None, description=None, zone_id=None, zone_name=None, type=None, ttl=None, records=None, created_at=None, updated_at=None, status=None, default=None, project_id=None, links=None, line=None, weight=None, health_check_id=None, alias_target=None):
"""ShowRecordSetWithLineResponse - a model defined in huaweicloud sdk"""
super(ShowRecordSetWithLineResponse, self).__init__()
self._id = None
self._name = None
self._description = None
self._zone_id = None
self._zone_name = None
self._type = None
self._ttl = None
self._records = None
self._created_at = None
self._updated_at = None
self._status = None
self._default = None
self._project_id = None
self._links = None
self._line = None
self._weight = None
self._health_check_id = None
self._alias_target = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if zone_id is not None:
self.zone_id = zone_id
if zone_name is not None:
self.zone_name = zone_name
if type is not None:
self.type = type
if ttl is not None:
self.ttl = ttl
if records is not None:
self.records = records
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if status is not None:
self.status = status
if default is not None:
self.default = default
if project_id is not None:
self.project_id = project_id
if links is not None:
self.links = links
if line is not None:
self.line = line
if weight is not None:
self.weight = weight
if health_check_id is not None:
self.health_check_id = health_check_id
if alias_target is not None:
self.alias_target = alias_target
@property
def id(self):
"""Gets the id of this ShowRecordSetWithLineResponse.
Record Set的ID。
:return: The id of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowRecordSetWithLineResponse.
Record Set的ID。
:param id: The id of this ShowRecordSetWithLineResponse.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ShowRecordSetWithLineResponse.
Record Set的名称。
:return: The name of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowRecordSetWithLineResponse.
Record Set的名称。
:param name: The name of this ShowRecordSetWithLineResponse.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ShowRecordSetWithLineResponse.
Record Set的描述信息。
:return: The description of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ShowRecordSetWithLineResponse.
Record Set的描述信息。
:param description: The description of this ShowRecordSetWithLineResponse.
:type: str
"""
self._description = description
@property
def zone_id(self):
"""Gets the zone_id of this ShowRecordSetWithLineResponse.
托管该记录的zone_id。
:return: The zone_id of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._zone_id
@zone_id.setter
def zone_id(self, zone_id):
"""Sets the zone_id of this ShowRecordSetWithLineResponse.
托管该记录的zone_id。
:param zone_id: The zone_id of this ShowRecordSetWithLineResponse.
:type: str
"""
self._zone_id = zone_id
@property
def zone_name(self):
"""Gets the zone_name of this ShowRecordSetWithLineResponse.
托管该记录的zone_name。
:return: The zone_name of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._zone_name
@zone_name.setter
def zone_name(self, zone_name):
"""Sets the zone_name of this ShowRecordSetWithLineResponse.
托管该记录的zone_name。
:param zone_name: The zone_name of this ShowRecordSetWithLineResponse.
:type: str
"""
self._zone_name = zone_name
@property
def type(self):
"""Gets the type of this ShowRecordSetWithLineResponse.
记录类型。
:return: The type of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowRecordSetWithLineResponse.
记录类型。
:param type: The type of this ShowRecordSetWithLineResponse.
:type: str
"""
self._type = type
@property
def ttl(self):
"""Gets the ttl of this ShowRecordSetWithLineResponse.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:return: The ttl of this ShowRecordSetWithLineResponse.
:rtype: int
"""
return self._ttl
@ttl.setter
def ttl(self, ttl):
"""Sets the ttl of this ShowRecordSetWithLineResponse.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:param ttl: The ttl of this ShowRecordSetWithLineResponse.
:type: int
"""
self._ttl = ttl
@property
def records(self):
"""Gets the records of this ShowRecordSetWithLineResponse.
域名解析后的值。
:return: The records of this ShowRecordSetWithLineResponse.
:rtype: list[str]
"""
return self._records
@records.setter
def records(self, records):
"""Sets the records of this ShowRecordSetWithLineResponse.
域名解析后的值。
:param records: The records of this ShowRecordSetWithLineResponse.
:type: list[str]
"""
self._records = records
@property
def created_at(self):
"""Gets the created_at of this ShowRecordSetWithLineResponse.
创建时间。
:return: The created_at of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ShowRecordSetWithLineResponse.
创建时间。
:param created_at: The created_at of this ShowRecordSetWithLineResponse.
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this ShowRecordSetWithLineResponse.
更新时间。
:return: The updated_at of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this ShowRecordSetWithLineResponse.
更新时间。
:param updated_at: The updated_at of this ShowRecordSetWithLineResponse.
:type: str
"""
self._updated_at = updated_at
@property
def status(self):
"""Gets the status of this ShowRecordSetWithLineResponse.
资源状态。
:return: The status of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowRecordSetWithLineResponse.
资源状态。
:param status: The status of this ShowRecordSetWithLineResponse.
:type: str
"""
self._status = status
@property
def default(self):
"""Gets the default of this ShowRecordSetWithLineResponse.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:return: The default of this ShowRecordSetWithLineResponse.
:rtype: bool
"""
return self._default
@default.setter
def default(self, default):
"""Sets the default of this ShowRecordSetWithLineResponse.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:param default: The default of this ShowRecordSetWithLineResponse.
:type: bool
"""
self._default = default
@property
def project_id(self):
"""Gets the project_id of this ShowRecordSetWithLineResponse.
该Record Set所属的项目ID。
:return: The project_id of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ShowRecordSetWithLineResponse.
该Record Set所属的项目ID。
:param project_id: The project_id of this ShowRecordSetWithLineResponse.
:type: str
"""
self._project_id = project_id
@property
def links(self):
"""Gets the links of this ShowRecordSetWithLineResponse.
:return: The links of this ShowRecordSetWithLineResponse.
:rtype: PageLink
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ShowRecordSetWithLineResponse.
:param links: The links of this ShowRecordSetWithLineResponse.
:type: PageLink
"""
self._links = links
@property
def line(self):
"""Gets the line of this ShowRecordSetWithLineResponse.
解析线路ID。
:return: The line of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._line
@line.setter
def line(self, line):
"""Sets the line of this ShowRecordSetWithLineResponse.
解析线路ID。
:param line: The line of this ShowRecordSetWithLineResponse.
:type: str
"""
self._line = line
@property
def weight(self):
"""Gets the weight of this ShowRecordSetWithLineResponse.
解析记录的权重。
:return: The weight of this ShowRecordSetWithLineResponse.
:rtype: int
"""
return self._weight
@weight.setter
def weight(self, weight):
"""Sets the weight of this ShowRecordSetWithLineResponse.
解析记录的权重。
:param weight: The weight of this ShowRecordSetWithLineResponse.
:type: int
"""
self._weight = weight
@property
def health_check_id(self):
"""Gets the health_check_id of this ShowRecordSetWithLineResponse.
健康检查ID。
:return: The health_check_id of this ShowRecordSetWithLineResponse.
:rtype: str
"""
return self._health_check_id
@health_check_id.setter
def health_check_id(self, health_check_id):
"""Sets the health_check_id of this ShowRecordSetWithLineResponse.
健康检查ID。
:param health_check_id: The health_check_id of this ShowRecordSetWithLineResponse.
:type: str
"""
self._health_check_id = health_check_id
@property
def alias_target(self):
"""Gets the alias_target of this ShowRecordSetWithLineResponse.
:return: The alias_target of this ShowRecordSetWithLineResponse.
:rtype: AliasTarget
"""
return self._alias_target
@alias_target.setter
def alias_target(self, alias_target):
"""Sets the alias_target of this ShowRecordSetWithLineResponse.
:param alias_target: The alias_target of this ShowRecordSetWithLineResponse.
:type: AliasTarget
"""
self._alias_target = alias_target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns | |
<reponame>kanepenley/OCS-Samples
# program.py
# Copyright 2019 OSIsoft, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this script was designed using the v1.0
# version of the OMF specification, as outlined here:
# http://omf-docs.osisoft.com/en/v1.0
# For more info, see OMF Developer Companion Guide:
# http://omf-companion-docs.osisoft.com
#*************************************************************************************
# OMF_API_Python3
# Version 1.0.0.1
# 3-20-19
# ************************************************************************
# Import necessary packages
# ************************************************************************
import configparser
import json
import time
import datetime
import platform
import socket
import gzip
import random
import requests
import traceback
# ************************************************************************
# Specify options for sending web requests to the target PI System
# ************************************************************************
# Specifys whether we are sending to PI or OCS. The main changes are in the accepted messages and the URL.
sendingToOCS = True
# Specify whether to compress OMF message before
# sending it to ingress endpoint
USE_COMPRESSION = False
# Set this to true if going against self signed certs and you don't want to see the error
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# Holder for the producer token. It is set from the configuration
producerToken = ""
# Holder for the omfEndPoint if sending to PI. It is set from the configuration
omfEndPoint = ""
# Holder for the omfEndPoint base if sending to OCS. Auth and OMF endpoint are built from this. It is set from the configuration
resourceBase = ""
# The version of the OMFmessages
omfVersion = "1.0"
# Holders for data message values
integer_boolean_value = 0
string_boolean_value = "True"
integer_index1 = 0
integer_index2_1 = 1
integer_index2_2 = 1
# Token information
__expiration = 0
__token = ""
# Auth information. It is set from the configuration
clientId = ""
clientSecret = ""
def getToken():
# Gets the oken for the omfsendpoint
global __expiration, __token, resourceBase, clientId, clientSecret, producerToken
if(not sendingToOCS):
return producerToken
if ((__expiration - time.time()) > 5 * 60):
return __token
# we can't short circuit it, so we must go retreive it.
discoveryUrl = requests.get(
resourceBase + "/identity/.well-known/openid-configuration",
headers= {"Accept" : "application/json"},
verify = VERIFY_SSL)
if discoveryUrl.status_code < 200 or discoveryUrl.status_code >= 300:
discoveryUrl.close()
print("Failed to get access token endpoint from discovery URL: {status}:{reason}".
format(status=discoveryUrl.status_code, reason=discoveryUrl.text))
raise ValueError
tokenEndpoint = json.loads(discoveryUrl.content)["token_endpoint"]
tokenInformation = requests.post(
tokenEndpoint,
data = {"client_id" : clientId,
"client_secret" : clientSecret,
"grant_type" : "client_credentials"},
verify = VERIFY_SSL)
token = json.loads(tokenInformation.content)
if token is None:
raise Exception("Failed to retrieve Token")
__expiration = float(token['expires_in']) + time.time()
__token = token['access_token']
return __token
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(message_type, message_omf_json, action = 'create'):
# Sends the request out to the preconfigured endpoint..
global producerToken, omfEndPoint, omfVersion, sendingToOCS
# Compress json omf payload, if specified
compression = 'none'
if USE_COMPRESSION:
msg_body = gzip.compress(bytes(json.dumps(message_omf_json), 'utf-8'))
compression = 'gzip'
else:
msg_body = json.dumps(message_omf_json)
# Assemble headers
msg_headers = {
"Authorization": "Bearer %s" % getToken(),
'producertoken': getToken(),
'messagetype': message_type,
'action': action,
'messageformat': 'JSON',
'omfversion': omfVersion,
'compression': compression
}
# Send the request, and collect the response
response = requests.post(
omfEndPoint,
headers = msg_headers,
data = msg_body,
verify = VERIFY_SSL,
timeout = WEB_REQUEST_TIMEOUT_SECONDS
)
# response code in 200s if the request was successful!
if response.status_code < 200 or response.status_code >= 300:
response.close()
print('Response from relay was bad. "{0}" message: {1} {2}. Message holdings: {3}'.format(message_type, response.status_code, response.text, message_omf_json))
print()
raise Exception("OMF message was unsuccessful, {message_type}. {status}:{reason}".format(message_type=message_type, status=response.status_code, reason=response.text))
else:
print('Response from relay from the initial "{0}" message: {1} {2}'.format(message_type, response.status_code, response.text))
def checkValue(url):
# Sends the request out to the preconfigured endpoint..
global producerToken
# Assemble headers
msg_headers = {
"Authorization": "Bearer %s" % getToken()
}
print(getToken())
# Send the request, and collect the response
response = requests.get(
url,
headers = msg_headers,
verify = VERIFY_SSL,
timeout = WEB_REQUEST_TIMEOUT_SECONDS
)
# response code in 200s if the request was successful!
if response.status_code < 200 or response.status_code >= 300:
response.close()
print('Response from endpoint was bad. "{0}"'.format(response.status_code))
print()
raise Exception("OMF message was unsuccessful. {status}:{reason}".format( status=response.status_code, reason=response.text))
return response.text
def getCurrentTime():
# Returns the current time
return datetime.datetime.utcnow().isoformat() + 'Z'
# Creates a JSON packet containing data values for containers
# of type FirstDynamicType defined below
def create_data_values_for_first_dynamic_type(containerid):
# Returns a JSON representation of data for the first dynamic type.
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"IntegerProperty": int(100*random.random())
}
]
}
]
# Creates a JSON packet containing data values for containers
# of type SecondDynamicType defined below
def create_data_values_for_second_dynamic_type(containerid):
# Returns a JSON representation of data for the the second type.
global string_boolean_value
if string_boolean_value == "True":
string_boolean_value = "False"
else:
string_boolean_value = "True"
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"NumberProperty1": 100*random.random(),
"NumberProperty2": 100*random.random(),
"StringEnum": string_boolean_value
}
]
}
]
# of type ThirdDynamicType defined below
def create_data_values_for_third_dynamic_type(containerid):
# Returns a JSON representation of data for the third dynamic type.
global integer_boolean_value
if integer_boolean_value == 0:
integer_boolean_value = 1
else:
integer_boolean_value = 0
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"IntegerEnum": integer_boolean_value
}
]
}
]
# Creates a JSON packet containing data values for containers
# of type NonTimeStampIndex defined below
def create_data_values_for_NonTimeStampIndexAndMultiIndex_type(NonTimeStampIndexID, MultiIndexId):
# Returns a JSON representation of data for the nontime stap and multi-index types.
global integer_index1
global integer_index2_1, integer_index2_2
integer_index1 = integer_index1 + 2
if integer_index2_2 % 3 == 0:
integer_index2_2 = 1
integer_index2_1 = integer_index2_1 +1
else:
integer_index2_2 = integer_index2_2 + 1
return [
{
"containerid": NonTimeStampIndexID,
"values": [
{
"Value": random.random()*88,
"Int_Key": integer_index1
},
{
"Value": random.random()*88,
"Int_Key": integer_index1 + 1
}
]
},
{
"containerid": MultiIndexId,
"values": [
{
"Value1": random.random()*-125,
"Value2": random.random()*42,
"IntKey": integer_index2_1,
"IntKey2": integer_index2_2
}
]
}
]
def oneTimeSendMessages(action = 'create'):
# Wrapper around all of the data and container messages.
global omfVersion
# ************************************************************************
# Send the types messages to define the types of streams that will be sent.
# These types are referenced in all later messages
# ************************************************************************
# The sample divides types, and sends static and dynamic types
# separatly only for readability; you can send all the type definitions
# in one message, as far as its size is below maximum allowed - 192K
# ************************************************************************
# Step 3
# Send a JSON packet to define static types
# Note for OCS this message is currently ignored.
send_omf_message_to_endpoint("type", [
{
"id": "FirstStaticType",
"name": "First static type",
"classification": "static",
"type": "object",
"description": "First static asset type",
"properties": {
"index": {
"type": "string",
"isindex": True,
"description": "not in use"
},
"name": {
"type": "string",
"isname": True,
"description": "not in use"
},
"StringProperty": {
"type": "string",
"description": "First static asset type's configuration attribute"
}
}
},
{
"id": "SecondStaticType",
"name": "Second static type",
"classification": "static",
"type": "object",
"description": "Second static asset type",
"properties": {
"index": {
"type": "string",
"isindex": True,
"description": "not in use"
},
"name": {
"type": "string",
"isname": True,
"description": "not in use"
},
"StringProperty": {
"type": "string",
"description": "Second static asset type's configuration attribute"
}
}
}
],
action)
# Step 4
# Send a JSON packet to define dynamic types
send_omf_message_to_endpoint("type", [
{
"id": "FirstDynamicType",
"name": "First dynamic type",
"classification": "dynamic",
"type": "object",
"description": "not in use",
"properties": {
"timestamp": {
"format": "date-time",
"type": "string",
"isindex": True,
"description": "not in use"
},
"IntegerProperty": {
"type": "integer",
"description": "PI point data referenced integer attribute"
}
}
},
| |
<reponame>tchin-divergent/tacs<gh_stars>0
import numpy as np
from tacs import TACS
import unittest
from mpi4py import MPI
'''
This is a base class for static problem unit test cases.
This base class will test function evaluations and total
and partial sensitivities for the user-specified problem
that inherits from it.
When the user creates a new test based on this class three
methods are required to be defined in the child class.
1. setup_assembler
2. setup_tacs_vecs
3. setup_funcs
See the virtual method implementations for each method
below for more details.
NOTE: The child class must NOT implement its own setUp method
for the unittest class. This is handled in the base class.
'''
class StaticTestCase:
class StaticTest(unittest.TestCase):
def setUp(self):
self.dtype = TACS.dtype
# Default fd/cs step size and tolerances
# Can be overridden in child class
if self.dtype == complex:
self.rtol = 1e-11
self.atol = 1e-8
self.dh = 1e-50
else:
self.rtol = 1e-2
self.atol = 1e-4
self.dh = 1e-5
# Set the MPI communicator
if not hasattr(self, 'comm'):
self.comm = MPI.COMM_WORLD
# Setup user-specified assembler for this test
self.assembler = self.setup_assembler(self.comm, self.dtype)
# Get the design variable values
self.dv0 = self.assembler.createDesignVec()
self.assembler.getDesignVars(self.dv0)
# Create tacs vectors and Matrix for linear/adjoint solve
self.res0 = self.assembler.createVec()
self.ans0 = self.assembler.createVec()
self.mat = self.assembler.createSchurMat()
# Jacobian matrix factors
self.alpha = 1.0
self.beta = 0.0
self.gamma = 0.0
# Initial nodal location vector
self.xpts0 = self.assembler.createNodeVec()
self.assembler.getNodes(self.xpts0)
# Create temporary dv vec for doing fd/cs
self.dv1 = self.assembler.createDesignVec()
self.ans1 = self.assembler.createVec()
self.xpts1 = self.assembler.createNodeVec()
# Setup force and perturbation vectors used for fd/cs projections
self.f = self.assembler.createVec()
self.dv_pert = self.assembler.createDesignVec()
self.ans_pert = self.assembler.createVec()
self.xpts_pert = self.assembler.createNodeVec()
# Populate force and perturbation vectors based on user-defined method
self.setup_tacs_vecs(self.assembler, self.f, self.dv_pert, self.ans_pert, self.xpts_pert)
# Zero out any bc nodes in the state variable vec (if the user didn't already do this)
self.assembler.applyBCs(self.ans_pert)
# Create the preconditioner for the corresponding matrix
self.pc = TACS.Pc(self.mat)
# Create GMRES solver object
subspace = 100
restarts = 2
self.gmres = TACS.KSM(self.mat, self.pc, subspace, restarts)
# Create the function list
self.func_list, self.func_ref = self.setup_funcs(self.assembler)
self.dfdu_list = []
self.adjoint_list = []
self.dfddv_list = []
self.dfdx_list = []
for i in range(len(self.func_list)):
self.dfdu_list.append(self.assembler.createVec())
self.adjoint_list.append(self.assembler.createVec())
self.dfddv_list.append(self.assembler.createDesignVec())
self.dfdx_list.append(self.assembler.createNodeVec())
def setup_assembler(self, comm, dtype):
"""
Setup mesh and tacs assembler for problem we will be testing.
Must be defined in child class that inherits from this class.
"""
raise NotImplementedError("Child class must implement a 'setup_assembler' method")
return
def setup_tacs_vecs(self, assembler, force_vec, dv_pert_vec, ans_pert_vec, xpts_pert_vec):
"""
Setup user-defined vectors for analysis and fd/cs sensitivity verification.
Must be defined in child class that inherits from this class.
"""
raise NotImplementedError("Child class must implement a 'setup_tacs_vecs' method")
return
def setup_funcs(self, assembler):
"""
Create a list of functions to be tested and their reference values for the problem.
Must be defined in child class that inherits from this class.
"""
raise NotImplementedError("Child class must implement a 'setup_funcs' method")
return
def test_solve(self):
"""
Test linear solve and function evaluations
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# solve
func_vals = self.run_solve()
# Test functions values against historical values
np.testing.assert_allclose(func_vals, self.func_ref, rtol=self.rtol, atol=self.atol)
def test_partial_dv_sensitivities(self):
"""
Test partial dv sensitivity against fd/cs
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# Initial solve
func_vals = self.run_solve()
# Compute the partial derivative w.r.t. material design variables
self.assembler.addDVSens(self.func_list, self.dfddv_list, 1.0)
# Accumulate sensitivity across all procs
self.set_tacs_vec_values(self.dfddv_list)
# Compute the total derivative w.r.t. material design variables using fd/cs
self.perturb_tacs_vec(self.dv1, self.dv0, self.dv_pert)
# Set the perturbed design variables
self.assembler.setDesignVars(self.dv1)
# Compute functions w/o resolving problem
func_vals_pert = self.assembler.evalFunctions(self.func_list)
# Compute approximate sens
fdv_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)
# Tests cs/fd against sensitivity from partial
for i in range(len(self.func_list)):
with self.subTest(function=self.func_list[i]):
dfddv_proj_i = self.dfddv_list[i].dot(self.dv_pert)
np.testing.assert_allclose(dfddv_proj_i, fdv_sens_approx[i],
rtol=self.rtol, atol=self.atol)
def test_partial_xpt_sensitivities(self):
"""
Test partial xpt sensitivity against fd/cs
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# Initial solve
func_vals = self.run_solve()
# Compute the total derivative w.r.t. nodal xpt locations
self.assembler.addXptSens(self.func_list, self.dfdx_list, 1.0)
# Accumulate sensitivity across all procs
self.set_tacs_vec_values(self.dfdx_list)
# Compute the total derivative w.r.t. nodal xpt locations using fd/cs
self.perturb_tacs_vec(self.xpts1, self.xpts0, self.xpts_pert)
# Set the perturbed node locations
self.assembler.setNodes(self.xpts1)
# Compute functions w/o resolving problem
func_vals_pert = self.assembler.evalFunctions(self.func_list)
# Compute approximate sens
f_xpt_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)
# Tests cs/fd against sensitivity from partial
for i in range(len(self.func_list)):
with self.subTest(function=self.func_list[i]):
dfdx_proj_i = self.dfdx_list[i].dot(self.xpts_pert)
np.testing.assert_allclose(dfdx_proj_i, f_xpt_sens_approx[i], rtol=self.rtol, atol=self.atol)
def test_partial_sv_sensitivities(self):
"""
Test partial sv sensitivity against fd/cs
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# Initial solve
func_vals = self.run_solve()
# Compute the partial derivative w.r.t. state variables
self.assembler.addSVSens(self.func_list, self.dfdu_list, self.alpha, self.beta, self.gamma)
# Compute the total derivative w.r.t. material design variables using fd/cs
self.perturb_tacs_vec(self.ans1, self.ans0, self.ans_pert)
# Set the perturbed state variables
self.assembler.setVariables(self.ans1)
# Compute functions w/o resolving problem
func_vals_pert = self.assembler.evalFunctions(self.func_list)
# Compute approximate sens
f_u_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)
# Tests cs/fd against sensitivity from partial
for i in range(len(self.func_list)):
with self.subTest(function=self.func_list[i]):
dfdu_proj_i = self.dfdu_list[i].dot(self.ans_pert)
np.testing.assert_allclose(dfdu_proj_i, f_u_sens_approx[i],
rtol=self.rtol, atol=self.atol)
def test_total_dv_sensitivities(self):
"""
Test total dv sensitivity through adjoint against fd/cs
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# Initial solve
func_vals = self.run_solve()
# Compute the total derivative w.r.t. material design variables using adjoint
self.run_adjoints()
self.assembler.addDVSens(self.func_list, self.dfddv_list, 1.0)
self.assembler.addAdjointResProducts(self.adjoint_list, self.dfddv_list, -1.0)
# Accumulate sensitivity across all procs
self.set_tacs_vec_values(self.dfddv_list)
# Compute the total derivative w.r.t. material design variables using fd/cs
self.perturb_tacs_vec(self.dv1, self.dv0, self.dv_pert)
# Run perturbed solution
func_vals_pert = self.run_solve(dv=self.dv1)
# Compute approximate sens
fdv_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)
# Tests cs/fd against sensitivity from adjoint
for i in range(len(self.func_list)):
with self.subTest(function=self.func_list[i]):
dfddv_proj_i = self.dfddv_list[i].dot(self.dv_pert)
np.testing.assert_allclose(dfddv_proj_i, fdv_sens_approx[i],
rtol=self.rtol, atol=self.atol)
def test_total_xpt_sensitivities(self):
"""
Test total xpt sensitivity through adjoint against fd/cs
"""
# Make sure vecs are initialized to zero
self.zero_tacs_vecs()
# Initial solve
func_vals = self.run_solve()
# Compute the total derivative w.r.t. nodal xpt locations using adjoint
self.run_adjoints()
self.assembler.addXptSens(self.func_list, self.dfdx_list, 1.0)
self.assembler.addAdjointResXptSensProducts(self.adjoint_list, self.dfdx_list, -1.0)
# Accumulate sensitivity across all procs
self.set_tacs_vec_values(self.dfdx_list)
# Compute the total derivative w.r.t. nodal xpt locations using fd/cs
self.perturb_tacs_vec(self.xpts1, self.xpts0, self.xpts_pert)
# Run perturbed solution
func_vals_pert = self.run_solve(xpts=self.xpts1)
# Compute approximate sens
f_xpt_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)
# Tests cs/fd against sensitivity from adjoint
for i in range(len(self.func_list)):
with self.subTest(function=self.func_list[i]):
dfdx_proj_i = self.dfdx_list[i].dot(self.xpts_pert)
np.testing.assert_allclose(dfdx_proj_i, f_xpt_sens_approx[i], rtol=self.rtol, atol=self.atol)
def run_solve(self, dv=None, xpts=None):
"""
Run a linear solve at specified design point and return functions of interest
"""
if dv is None:
dv = self.dv0
if xpts is None:
xpts = self.xpts0
# Set the design variables
self.assembler.setDesignVars(dv)
# Set node locations
self.assembler.setNodes(xpts)
# Assemble the stiffness matrix
self.assembler.zeroVariables()
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res0, self.mat)
self.pc.factor()
# zero out bc terms in force
self.assembler.applyBCs(self.f)
# add force vector to residual (R = Ku - f)
self.res0.axpy(-1.0, self.f)
# Solve the linear system
self.gmres.solve(self.res0, self.ans0)
self.ans0.scale(-1.0)
# Update state variables with solution
self.assembler.setVariables(self.ans0)
func_vals = self.assembler.evalFunctions(self.func_list)
return np.array(func_vals)
def run_adjoints(self):
"""
Run adjoint solves for each function of interest
"""
# Set the design variables
self.assembler.setDesignVars(self.dv0)
# Set node locations
self.assembler.setNodes(self.xpts0)
# Assemble the transpose stiffness matrix
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, None, self.mat, TACS.TRANSPOSE)
self.pc.factor()
# Solve for the adjoint variables
self.assembler.addSVSens(self.func_list, self.dfdu_list, self.alpha, self.beta, self.gamma)
for i in range(len(self.func_list)):
self.gmres.solve(self.dfdu_list[i], self.adjoint_list[i])
def zero_tacs_vecs(self):
"""
Reset all vectors associated with solution and adjoint
"""
# Zero solution vector
self.ans0.zeroEntries()
# Zero residual
self.res0.zeroEntries()
# Set state vars to zero
self.assembler.zeroVariables()
# Zero dv sens for each function
for dfddv in self.dfddv_list:
dfddv.zeroEntries()
# Zero xpt sens for each function
for dfdx in self.dfdx_list:
dfdx.zeroEntries()
# Zero sv sens for each function
for dfdu in self.dfdu_list:
dfdu.zeroEntries()
def set_tacs_vec_values(self, tacs_vecs):
"""
Begin setting the values: Collective on the TACS communicator
"""
for vec in tacs_vecs:
vec.beginSetValues()
vec.endSetValues()
def perturb_tacs_vec(self, vec_out, vec_in, vec_pert):
"""
Perform fd/cs perturbation on tacs vector as follows
vec_out = vec_in + scale * vec_pert
where:
scale = dh * 1j, in complex mode
scale = dh, in real mode
"""
vec_out.copyValues(vec_in)
if self.dtype == complex:
vec_out.axpy(self.dh * 1j, | |
import numpy as np
import pytest
import pygeos
from pygeos import Geometry, GEOSException
from pygeos.testing import assert_geometries_equal
from .common import (
all_types,
empty,
empty_line_string,
empty_point,
empty_polygon,
line_string,
multi_point,
point,
point_z,
)
CONSTRUCTIVE_NO_ARGS = (
pygeos.boundary,
pygeos.centroid,
pygeos.convex_hull,
pygeos.envelope,
pygeos.extract_unique_points,
pygeos.normalize,
pygeos.point_on_surface,
)
CONSTRUCTIVE_FLOAT_ARG = (
pygeos.buffer,
pygeos.offset_curve,
pygeos.delaunay_triangles,
pygeos.simplify,
pygeos.voronoi_polygons,
)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_NO_ARGS)
def test_no_args_array(geometry, func):
actual = func([geometry, geometry])
assert actual.shape == (2,)
assert actual[0] is None or isinstance(actual[0], Geometry)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_array(geometry, func):
if func is pygeos.offset_curve and pygeos.get_type_id(geometry) not in [1, 2]:
with pytest.raises(GEOSException, match="only accept linestrings"):
func([geometry, geometry], 0.0)
return
actual = func([geometry, geometry], 0.0)
assert actual.shape == (2,)
assert isinstance(actual[0], Geometry)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("reference", all_types)
def test_snap_array(geometry, reference):
actual = pygeos.snap([geometry, geometry], [reference, reference], tolerance=1.0)
assert actual.shape == (2,)
assert isinstance(actual[0], Geometry)
@pytest.mark.parametrize("func", CONSTRUCTIVE_NO_ARGS)
def test_no_args_missing(func):
actual = func(None)
assert actual is None
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_missing(func):
actual = func(None, 1.0)
assert actual is None
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_nan(geometry, func):
actual = func(geometry, float("nan"))
assert actual is None
def test_buffer_cap_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.buffer(point, 1, cap_style="invalid")
def test_buffer_join_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.buffer(point, 1, join_style="invalid")
def test_snap_none():
actual = pygeos.snap(None, point, tolerance=1.0)
assert actual is None
@pytest.mark.parametrize("geometry", all_types)
def test_snap_nan_float(geometry):
actual = pygeos.snap(geometry, point, tolerance=np.nan)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
def test_build_area_none():
actual = pygeos.build_area(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(point, empty), # a point has no area
(line_string, empty), # a line string has no area
# geometry collection of two polygons are combined into one
(
Geometry(
"GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), POLYGON((1 1, 1 2, 2 2, 1 1)))"
),
Geometry("POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 2, 1 2, 1 1))"),
),
(empty, empty),
([empty], [empty]),
],
)
def test_build_area(geom, expected):
actual = pygeos.build_area(geom)
assert actual is not expected
assert actual == expected
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
def test_make_valid_none():
actual = pygeos.make_valid(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(point, point), # a valid geometry stays the same (but is copied)
# an L shaped polygon without area is converted to a multilinestring
(
Geometry("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))"),
Geometry("MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"),
),
# a polygon with self-intersection (bowtie) is converted into polygons
(
Geometry("POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"),
Geometry("MULTIPOLYGON (((1 1, 2 2, 2 0, 1 1)), ((0 0, 0 2, 1 1, 0 0)))"),
),
(empty, empty),
([empty], [empty]),
],
)
def test_make_valid(geom, expected):
actual = pygeos.make_valid(geom)
assert actual is not expected
# normalize needed to handle variation in output across GEOS versions
assert pygeos.normalize(actual) == expected
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(all_types, all_types),
# first polygon is valid, second polygon has self-intersection
(
[
Geometry("POLYGON((0 0, 2 2, 0 2, 0 0))"),
Geometry("POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"),
],
[
Geometry("POLYGON((0 0, 2 2, 0 2, 0 0))"),
Geometry(
"MULTIPOLYGON (((1 1, 0 0, 0 2, 1 1)), ((1 1, 2 2, 2 0, 1 1)))"
),
],
),
([point, None, empty], [point, None, empty]),
],
)
def test_make_valid_1d(geom, expected):
actual = pygeos.make_valid(geom)
# normalize needed to handle variation in output across GEOS versions
assert np.all(pygeos.normalize(actual) == pygeos.normalize(expected))
@pytest.mark.parametrize(
"geom,expected",
[
(point, point), # a point is always in normalized form
# order coordinates of linestrings and parts of multi-linestring
(
Geometry("MULTILINESTRING ((1 1, 0 0), (1 1, 1 2))"),
Geometry("MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"),
),
],
)
def test_normalize(geom, expected):
actual = pygeos.normalize(geom)
assert actual == expected
def test_offset_curve_empty():
actual = pygeos.offset_curve(empty_line_string, 2.0)
assert pygeos.is_empty(actual)
def test_offset_curve_distance_array():
# check that kwargs are passed through
result = pygeos.offset_curve([line_string, line_string], [-2.0, -3.0])
assert result[0] == pygeos.offset_curve(line_string, -2.0)
assert result[1] == pygeos.offset_curve(line_string, -3.0)
def test_offset_curve_kwargs():
# check that kwargs are passed through
result1 = pygeos.offset_curve(
line_string, -2.0, quadsegs=2, join_style="mitre", mitre_limit=2.0
)
result2 = pygeos.offset_curve(line_string, -2.0)
assert result1 != result2
def test_offset_curve_non_scalar_kwargs():
msg = "only accepts scalar values"
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve([line_string, line_string], 1, quadsegs=np.array([8, 9]))
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve(
[line_string, line_string], 1, join_style=["round", "bevel"]
)
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve([line_string, line_string], 1, mitre_limit=[5.0, 6.0])
def test_offset_curve_join_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.offset_curve(line_string, 1.0, join_style="invalid")
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
@pytest.mark.parametrize(
"geom,expected",
[
(
pygeos.Geometry("LINESTRING (0 0, 1 2)"),
pygeos.Geometry("LINESTRING (1 2, 0 0)"),
),
(
pygeos.Geometry("LINEARRING (0 0, 1 2, 1 3, 0 0)"),
pygeos.Geometry("LINEARRING (0 0, 1 3, 1 2, 0 0)"),
),
(
pygeos.Geometry("POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))"),
pygeos.Geometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"),
),
(
pygeos.Geometry(
"POLYGON((0 0, 10 0, 10 10, 0 10, 0 0), (2 2, 2 4, 4 4, 4 2, 2 2))"
),
pygeos.Geometry(
"POLYGON((0 0, 0 10, 10 10, 10 0, 0 0), (2 2, 4 2, 4 4, 2 4, 2 2))"
),
),
pytest.param(
pygeos.Geometry("MULTILINESTRING ((0 0, 1 2), (3 3, 4 4))"),
pygeos.Geometry("MULTILINESTRING ((1 2, 0 0), (4 4, 3 3))"),
marks=pytest.mark.skipif(
pygeos.geos_version < (3, 8, 1), reason="GEOS < 3.8.1"
),
),
(
pygeos.Geometry(
"MULTIPOLYGON (((0 0, 1 0, 1 1, 0 1, 0 0)), ((2 2, 2 3, 3 3, 3 2, 2 2)))"
),
pygeos.Geometry(
"MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ((2 2, 3 2, 3 3, 2 3, 2 2)))"
),
),
# points are unchanged
(point, point),
(point_z, point_z),
(multi_point, multi_point),
# empty geometries are unchanged
(empty_point, empty_point),
(empty_line_string, empty_line_string),
(empty, empty),
(empty_polygon, empty_polygon),
],
)
def test_reverse(geom, expected):
assert_geometries_equal(pygeos.reverse(geom), expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
def test_reverse_none():
assert pygeos.reverse(None) is None
assert pygeos.reverse([None]).tolist() == [None]
geometry = pygeos.Geometry("POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))")
expected = pygeos.Geometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))")
result = pygeos.reverse([None, geometry])
assert result[0] is None
assert_geometries_equal(result[1], expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
@pytest.mark.parametrize("geom", ["Not a geometry", 1])
def test_reverse_invalid_type(geom):
with pytest.raises(TypeError, match="One of the arguments is of incorrect type"):
pygeos.reverse(geom)
@pytest.mark.parametrize(
"geom,expected",
[
# Point outside
("POINT (0 0)", "GEOMETRYCOLLECTION EMPTY"),
# Point inside
("POINT (15 15)", "POINT (15 15)"),
# Point on boundary
("POINT (15 10)", "GEOMETRYCOLLECTION EMPTY"),
# Line outside
("LINESTRING (0 0, -5 5)", "GEOMETRYCOLLECTION EMPTY"),
# Line inside
("LINESTRING (15 15, 16 15)", "LINESTRING (15 15, 16 15)"),
# Line on boundary
("LINESTRING (10 15, 10 10, 15 10)", "GEOMETRYCOLLECTION EMPTY"),
# Line splitting rectangle
("LINESTRING (10 5, 25 20)", "LINESTRING (15 10, 20 15)"),
],
)
def test_clip_by_rect(geom, expected):
geom, expected = pygeos.Geometry(geom), pygeos.Geometry(expected)
actual = pygeos.clip_by_rect(geom, 10, 10, 20, 20)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize(
"geom, rect, expected",
[
# Polygon hole (CCW) fully on rectangle boundary"""
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))",
(10, 10, 20, 20),
"GEOMETRYCOLLECTION EMPTY",
),
# Polygon hole (CW) fully on rectangle boundary"""
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10))",
(10, 10, 20, 20),
"GEOMETRYCOLLECTION EMPTY",
),
# Polygon fully within rectangle"""
(
"POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))",
(0, 0, 40, 40),
"POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))",
),
# Polygon overlapping rectangle
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))",
(5, 5, 15, 15),
"POLYGON ((5 5, 5 15, 10 15, 10 10, 15 10, 15 5, 5 5))",
),
],
)
def test_clip_by_rect_polygon(geom, rect, expected):
geom, expected = pygeos.Geometry(geom), pygeos.Geometry(expected)
actual = pygeos.clip_by_rect(geom, *rect)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize("geometry", all_types)
def test_clip_by_rect_array(geometry):
actual = pygeos.clip_by_rect([geometry, geometry], 0.0, 0.0, 1.0, 1.0)
assert actual.shape == | |
import copy
import random
from dataclasses import dataclass, replace
from typing import Iterable, List, Optional, Tuple, TypeVar, Union
from bs4 import BeautifulSoup
from bs4.element import Tag
from leginorma import ArticleStatus, LegifranceArticle, LegifranceSection, LegifranceText
from envinorma.io.parse_html import extract_table
from envinorma.models import ArreteMinisteriel, EnrichedString, Link, StructuredText, Table, standardize_title_date
from envinorma.structure import split_alineas_in_sections
from envinorma.title_detection import NumberingPattern, detect_patterns_if_exists, is_mainly_upper, is_probably_title
from .numbering_exceptions import EXCEPTION_PREFIXES, MAX_PREFIX_LEN
from .text_proximity import text_proximity
@dataclass
class TableReference:
table: Table
reference: str
@dataclass
class LinkReference:
reference: str
target: str
text: str
def _clean_title(str_: EnrichedString) -> EnrichedString:
return replace(str_, text=str_.text.strip().replace('\r\n', ' ').replace('\n', ' '))
def keep_visa_string(visas: List[str]) -> List[str]:
return [visa for visa in visas if visa[:2].lower() == 'vu']
def split_in_non_empty_html_line(html: str) -> List[str]:
return [x for x in html.split('<br/>') if x]
def _extract_visa(visa_raw: str) -> List[EnrichedString]:
return [_extract_links(str_) for str_ in keep_visa_string(split_in_non_empty_html_line(visa_raw))]
def remove_empty(strs: List[str]) -> List[str]:
stripped = [str_.strip() for str_ in strs]
return [str_ for str_ in stripped if str_]
def extract_alineas(html_text: str) -> List[str]:
soup = BeautifulSoup(html_text, 'html.parser')
for tag_type in ['sup', 'sub', 'font', 'strong', 'b', 'i', 'em']:
for tag in soup.find_all(tag_type):
tag.unwrap()
return [str(sstr) for sstr in BeautifulSoup(str(soup), 'html.parser').stripped_strings]
def _extract_placeholder_positions(text: str, placeholder: str) -> Tuple[str, List[int]]:
pieces = text.split(placeholder)
lengths = [len(piece) for piece in pieces]
cumulative_lengths = cumsum(lengths)
return ''.join(pieces), cumulative_lengths[:-1]
def secure_zip(*lists: List) -> Iterable[Tuple]:
lengths = [len(list_) for list_ in lists]
if len(set(lengths)) != 1:
raise ValueError(f'Lists have different lengths: {lengths}')
return zip(*lists)
_BR_PLACEHOLDER = '{{BR_PLACEHOLDER}}'
def _remove_tables(text: str) -> Tuple[str, List[TableReference]]:
soup = BeautifulSoup(text, 'html.parser')
tables: List[Table] = []
references: List[str] = []
for div in soup.find_all('table'):
reference = _generate_reference()
tables.append(extract_table(str(div)))
div.replace_with(f'{_BR_PLACEHOLDER}{reference}{_BR_PLACEHOLDER}') # type: ignore
references.append(reference)
table_refs = [TableReference(table, reference) for table, reference in zip(tables, references)]
return str(soup).replace(_BR_PLACEHOLDER, '<br/>'), table_refs
def _remove_links(text: str) -> Tuple[str, List[LinkReference]]:
soup = BeautifulSoup(text, 'html.parser')
links: List[LinkReference] = []
for tag in soup.find_all('a'):
if 'href' not in tag.attrs: # type: ignore
continue
reference = _generate_reference()
links.append(LinkReference(reference, _BASE_LEGIFRANCE_URL + tag['href'], tag.text)) # type: ignore
tag.replace_with(reference) # type: ignore
return str(soup), links
def remove_empty_enriched_str(strs: List[EnrichedString]) -> List[EnrichedString]:
return [str_ for str_ in strs if str_.text or str_.table]
TP = TypeVar('TP')
def _extract_first_non_null_elt(elements: List[TP]) -> Optional[TP]:
for element in elements:
if element is not None:
return element
return None
def select_alineas_for_splitting(alineas: List[str], pattern_names: List[Optional[NumberingPattern]]) -> List[bool]:
nb_alineas = len(alineas)
if nb_alineas != len(pattern_names):
raise ValueError(f'Expecting same lengths, received {nb_alineas} and {len(pattern_names)}')
first_pattern = _extract_first_non_null_elt(pattern_names)
if first_pattern is None:
return [False] * nb_alineas
return [pattern == first_pattern for pattern in pattern_names]
def _build_structured_text(
title: str, alineas: List[str], pattern_names: List[Optional[NumberingPattern]]
) -> StructuredText:
if len(alineas) != len(pattern_names):
raise ValueError(f'{len(pattern_names)} pattern_names != {len(alineas)} alineas')
if not any(pattern_names):
outer_alineas = alineas
grouped_alineas: List[List[str]] = []
grouped_pattern_names: List[List[Optional[NumberingPattern]]] = []
else:
selected_alineas_for_section = select_alineas_for_splitting(alineas, pattern_names)
outer_alineas, grouped_alineas = split_alineas_in_sections(alineas, selected_alineas_for_section)
_, grouped_pattern_names = split_alineas_in_sections(pattern_names, selected_alineas_for_section)
return StructuredText(
_clean_title(_extract_links(title)),
remove_empty_enriched_str([_extract_links(al) for al in outer_alineas]),
[
_build_structured_text(alinea_group[0], alinea_group[1:], pattern_name_group[1:])
for alinea_group, pattern_name_group in zip(grouped_alineas, grouped_pattern_names)
],
None,
)
def extract_pattern_names(alineas: List[str]) -> List[Optional[NumberingPattern]]:
return detect_patterns_if_exists(alineas, (MAX_PREFIX_LEN, EXCEPTION_PREFIXES))
def _structure_text(title: str, alineas: List[str]) -> StructuredText:
pattern_names = extract_pattern_names(alineas)
return _build_structured_text(title, alineas, pattern_names)
REF = TypeVar('REF', bound=Union[TableReference, LinkReference])
def _find_reference(str_: EnrichedString, references: List[REF], exact_match: bool = True) -> Optional[REF]:
for reference in references:
if reference.reference in str_.text:
if exact_match and str_.text != reference.reference:
raise ValueError(f'There is sth else than a reference in this string: {str_}')
return reference
return None
def _find_references(str_: EnrichedString, references: List[REF]) -> List[REF]:
return [reference for reference in references if reference.reference in str_.text]
def _add_table_if_any(str_: EnrichedString, tables: List[TableReference]) -> EnrichedString:
match = _find_reference(str_, tables)
if not match:
return copy.deepcopy(str_)
return EnrichedString('', [], match.table)
def _put_tables_back(text: StructuredText, tables: List[TableReference]) -> StructuredText:
clean_title = _add_table_if_any(text.title, tables)
return StructuredText(
_clean_title(clean_title),
[_add_table_if_any(alinea, tables) for alinea in text.outer_alineas],
[_put_tables_back(section, tables) for section in text.sections],
None,
)
_LINK_PLACEHOLDER = '{{LINK}}'
def _add_links_if_any(str_: EnrichedString, links: List[LinkReference]) -> EnrichedString:
matches = _find_references(str_, links)
str_copy = copy.deepcopy(str_)
for match in matches:
str_copy.text = str_copy.text.replace(match.reference, f'{_LINK_PLACEHOLDER}{match.text}{_LINK_PLACEHOLDER}')
str_copy.text, positions = _extract_placeholder_positions(str_copy.text, _LINK_PLACEHOLDER)
for match, start, end in zip(matches, positions[::2], positions[1::2]):
str_copy.links.append(Link(match.target, start, end - start))
return str_copy
def _put_links_back(text: StructuredText, links: List[LinkReference]) -> StructuredText:
clean_title = _add_links_if_any(text.title, links)
return StructuredText(
_clean_title(clean_title),
[_add_links_if_any(alinea, links) for alinea in text.outer_alineas],
[_put_links_back(section, links) for section in text.sections],
None,
)
_WEIRD_ANNEXE = 'A N N E X E'
_ROMAN_REPLACERS: List[Tuple[str, str]] = [
('I X', 'IX'),
('V I I I', 'VIII'),
('V I I', 'VII'),
('V I', 'VI'),
('I V', 'IV'),
('I I I', 'III'),
('I I', 'II'),
]
_ROMAN_ANNEXES = [(f'{_WEIRD_ANNEXE} {_BEF}', f'ANNEXE {_AF}') for _BEF, _AF in _ROMAN_REPLACERS]
_ANNEXE_REPLACERS = [(f'{_WEIRD_ANNEXE} S', 'ANNEXES'), *_ROMAN_ANNEXES] + [(_WEIRD_ANNEXE, 'ANNEXE')]
def _replace_weird_annexe_words(str_: str) -> str:
res = str_
for bef, aft in _ANNEXE_REPLACERS:
res = res.replace(bef, aft)
return res
def remove_summaries(alineas: List[str]) -> List[str]:
i = 0
found = False
for i, alinea in enumerate(alineas):
if alinea == 'SOMMAIRE' and i + 1 < len(alineas) and alineas[i + 1] == 'Annexe I.':
found = True
break
if not found:
return alineas
for j in range(i + 1, len(alineas)):
if alineas[j] == "Modalités de calcul du dimensionnement du plan d'épandage.":
return alineas[:i] + alineas[j + 1 :]
return alineas
def _html_to_structured_text(html: str, extract_structure: bool = False) -> StructuredText:
html_with_correct_annexe = _replace_weird_annexe_words(html)
html_without_tables, tables = _remove_tables(html_with_correct_annexe)
html_without_links, links = _remove_links(html_without_tables)
alineas = extract_alineas(html_without_links)
filtered_alineas = remove_summaries(alineas)
if extract_structure:
final_text = _structure_text('', filtered_alineas)
else:
final_text = _build_structured_text('', filtered_alineas, [None for _ in range(len(filtered_alineas))])
return _put_tables_back(_put_links_back(final_text, links), tables)
def print_structured_text(text: StructuredText, prefix: str = '') -> None:
print(f'{prefix}{text.title}')
new_prefix = f'\t{prefix}'
print(new_prefix + f'\n{new_prefix}'.join([alinea.text for alinea in text.outer_alineas]))
for section in text.sections:
print_structured_text(section, new_prefix)
def cumsum(ints: List[int]) -> List[int]:
if not ints:
return []
res = [ints[0]]
for int_ in ints[1:]:
res.append(res[-1] + int_)
return res
_ALPHABET = '0123456789ABCDEF'
def _generate_random_string(size: int) -> str:
return ''.join([random.choice(_ALPHABET) for _ in range(size)]) # noqa: S311
_REF_SIG_LEFT = '$$REF_L$$'
_REF_SIG_RIGHT = '$$REF_R$$'
def _generate_reference() -> str:
return f'{_REF_SIG_LEFT}{_generate_random_string(6)}{_REF_SIG_RIGHT}'
_BASE_LEGIFRANCE_URL = 'https://www.legifrance.gouv.fr'
def _replace_link(link_tag: Tag, placeholder: str, add_legifrance_prefix: bool) -> Tuple[str, int]: # side effects
link_text = link_tag.text
link_tag.replace_with(placeholder + link_text) # type: ignore
return (_BASE_LEGIFRANCE_URL if add_legifrance_prefix else '') + link_tag['href'], len(link_text) # type: ignore
def _extract_links(text: str, add_legifrance_prefix: bool = True) -> EnrichedString:
soup = BeautifulSoup(text, 'html.parser')
placeholder = '{{{LINK}}}'
raw_links = [_replace_link(tag, placeholder, add_legifrance_prefix) for tag in soup.find_all('a')] # type: ignore
final_text, positions = _extract_placeholder_positions(soup.text, placeholder)
return EnrichedString(
final_text, [Link(target, position, size) for (target, size), position in secure_zip(raw_links, positions)]
)
def _move_upper_alineas_to_title_in_annexe(alineas: List[EnrichedString]) -> Tuple[str, List[EnrichedString]]:
first_lines = []
for i in [0, 1]:
if len(alineas) > i and is_mainly_upper(alineas[i].text):
first_lines.append(alineas[i].text)
else:
break
return ' '.join(first_lines), alineas[len(first_lines) :]
def _move_upper_alineas_to_title_in_article(alineas: List[EnrichedString]) -> Tuple[str, List[EnrichedString]]:
if not alineas:
return '', []
if is_probably_title(alineas[0].text):
return alineas[0].text, alineas[1:]
return '', alineas
def _generate_article_title(
article: LegifranceArticle, outer_alineas: List[EnrichedString]
) -> Tuple[EnrichedString, List[EnrichedString]]:
if article.num and 'annexe' in article.num.lower():
title, new_outer_alineas = _move_upper_alineas_to_title_in_annexe(outer_alineas)
final_title = f'{article.num} - {title}' if title else article.num
return EnrichedString(final_title), new_outer_alineas
title, new_outer_alineas = _move_upper_alineas_to_title_in_article(outer_alineas)
title_beginning = f'Article {article.num}' if article.num is not None else 'Article'
title_end = f' - {title}' if title else ''
return EnrichedString(title_beginning + title_end), new_outer_alineas
_EXISTING_INSTALLATIONS_PATTERN = 'dispositions applicables aux installations existantes'
def _contains_lower(strs: List[str], pattern: str) -> bool:
for str_ in strs:
if pattern in str_.lower():
return True
return False
def _is_about_existing_installations(article: LegifranceArticle, ascendant_titles: List[str]) -> bool:
if _contains_lower(ascendant_titles, _EXISTING_INSTALLATIONS_PATTERN):
return True
in_annexe = _contains_lower(ascendant_titles + [article.num or ''], 'annexe')
return in_annexe and _EXISTING_INSTALLATIONS_PATTERN in article.content.lower()
def _extract_text_from_legifrance_article(article: LegifranceArticle, ascendant_titles: List[str]) -> StructuredText:
structured_text = _html_to_structured_text(
article.content, not _is_about_existing_installations(article, ascendant_titles)
)
if structured_text.title.text:
raise ValueError(f'Should not happen. Article should not have titles. Article id : {article.id}')
title, outer_alineas = _generate_article_title(article, structured_text.outer_alineas)
return StructuredText(_clean_title(title), outer_alineas, structured_text.sections, None)
def _extract_text_from_legifrance_section(section: LegifranceSection, ascendant_titles: List[str]) -> StructuredText:
return StructuredText(
_clean_title(_extract_links(section.title)),
[],
_extract_sections(section.articles, section.sections, ascendant_titles),
None,
)
def _extract_structured_text(
section_or_article: Union[LegifranceSection, LegifranceArticle], ascendant_titles: List[str]
) -> StructuredText:
if isinstance(section_or_article, LegifranceSection):
return _extract_text_from_legifrance_section(section_or_article, ascendant_titles + [section_or_article.title])
return _extract_text_from_legifrance_article(section_or_article, ascendant_titles)
def _extract_sections(
articles: List[LegifranceArticle], sections: List[LegifranceSection], ascendant_titles: List[str]
) -> List[StructuredText]:
articles_and_sections: List[Union[LegifranceArticle, LegifranceSection]] = [*articles, *sections]
return [
_extract_structured_text(article_or_section, ascendant_titles)
for article_or_section in sorted(articles_and_sections, key=lambda x: x.int_ordre)
]
def _html_to_str(html: str) -> str:
return BeautifulSoup(html, 'html.parser').text
def _are_very_similar(article_1: LegifranceArticle, article_2: LegifranceArticle) -> bool:
return text_proximity(_html_to_str(article_1.content), _html_to_str(article_2.content)) >= 0.95
def _particular_case(article_1: LegifranceArticle, article_2: LegifranceArticle) -> bool:
return article_1.num == 'Annexe I' and article_2.num == 'Annexe I (suite)'
_ArticlePair = Tuple[LegifranceArticle, LegifranceArticle]
_ArticleGroup = Union[LegifranceArticle, _ArticlePair]
def _check_number_of_articles(groups: List[_ArticleGroup], expected_nb_articles: int) -> None:
nb_articles = sum([1 if isinstance(group, LegifranceArticle) | |
between "'s"
if "'" in string:
# Split on possessive 's
dispos = []
prev = 0
for match in self.possessive.finditer(string):
dispos.append((prev, match.start()))
prev = match.end()
if prev < len(string):
dispos.append((prev, len(string)))
else:
# Shortcut if there's no apostrophe in the string
dispos = ((0, len(string)),)
# For each run between 's
for sc, ec in dispos:
# Split on boundary characters
for part_match in self.between.finditer(string, sc, ec):
part_start = part_match.start()
part_end = part_match.end()
if splitting:
# The point to start splitting at
prev = part_start
# Find transitions (e.g. "iW" or "a0")
for bmatch in bound.finditer(string, part_start, part_end):
# The point in the middle of the transition
pivot = bmatch.start() + 1
# Yield from the previous match to the transition
yield (prev, pivot)
# Make the transition the new starting point
prev = pivot
# If there's leftover text at the end, yield it too
if prev < part_end:
yield (prev, part_end)
else:
# Not splitting on transitions, just yield the part
yield (part_start, part_end)
def _merge(self, parts):
mergewords = self.mergewords
mergenums = self.mergenums
# Current type (1=alpah, 2=digit)
last = 0
# Where to insert a merged term in the original list
insertat = 0
# Buffer for parts to merge
buf = []
# Iterate on a copy of the parts list so we can modify the original as
# we go
def insert_item(buf, at, newpos):
newtext = "".join(item[0] for item in buf)
newsc = buf[0][2] # start char of first item in buffer
newec = buf[-1][3] # end char of last item in buffer
parts.insert(insertat, (newtext, newpos, newsc, newec))
for item in list(parts):
# item = (text, pos, startchar, endchar)
text = item[0]
pos = item[1]
# Set the type of this part
if text.isalpha():
this = 1
elif text.isdigit():
this = 2
# Is this the same type as the previous part?
if (buf and (this == last == 1 and mergewords)
or (this == last == 2 and mergenums)):
# This part is the same type as the previous. Add it to the
# buffer of parts to merge.
buf.append(item)
else:
# This part is different than the previous.
if len(buf) > 1:
# If the buffer has at least two parts in it, merge them
# and add them to the original list of parts.
insert_item(buf, insertat, pos - 1)
insertat += 1
# Reset the buffer
buf = [item]
last = this
insertat += 1
# If there are parts left in the buffer at the end, merge them and add
# them to the original list.
if len(buf) > 1:
insert_item(buf, len(parts), pos)
def __call__(self, tokens):
mergewords = self.mergewords
mergenums = self.mergenums
# This filter renumbers tokens as it expands them. New position
# counter.
newpos = None
for t in tokens:
text = t.text
# If this is the first token we've seen, use it to set the new
# position counter
if newpos is None:
if t.positions:
newpos = t.pos
else:
# Token doesn't have positions, just use 0
newpos = 0
if ((text.isalpha() and (text.islower() or text.isupper()))
or text.isdigit()):
# Short-circuit the common cases of no delimiters, no case
# transitions, only digits, etc.
t.pos = newpos
yield t
newpos += 1
else:
# Split the token text on delimiters, word and/or number
# boundaries into a list of (text, pos, startchar, endchar)
# tuples
ranges = self._split(text)
parts = [(text[sc:ec], i + newpos, sc, ec)
for i, (sc, ec) in enumerate(ranges)]
# Did the split yield more than one part?
if len(parts) > 1:
# If the options are set, merge consecutive runs of all-
# letters and/or all-numbers.
if mergewords or mergenums:
self._merge(parts)
# Yield tokens for the parts
chars = t.chars
if chars:
base = t.startchar
for text, pos, startchar, endchar in parts:
t.text = text
t.pos = pos
if t.chars:
t.startchar = base + startchar
t.endchar = base + endchar
yield t
if parts:
# Set the new position counter based on the last part
newpos = parts[-1][1] + 1
class CompoundWordFilter(Filter):
"""Given a set of words (or any object with a ``__contains__`` method),
break any tokens in the stream that are composites of words in the word set
into their individual parts.
Given the correct set of words, this filter can break apart run-together
words and trademarks (e.g. "turbosquid", "applescript"). It can also be
useful for agglutinative languages such as German.
The ``keep_compound`` argument lets you decide whether to keep the
compound word in the token stream along with the word segments.
>>> cwf = CompoundWordFilter(wordset, keep_compound=True)
>>> analyzer = RegexTokenizer(r"\S+") | cwf
>>> [t.text for t in analyzer("I do not like greeneggs and ham")
["I", "do", "not", "like", "greeneggs", "green", "eggs", "and", "ham"]
>>> cwf.keep_compound = False
>>> [t.text for t in analyzer("I do not like greeneggs and ham")
["I", "do", "not", "like", "green", "eggs", "and", "ham"]
"""
def __init__(self, wordset, keep_compound=True):
"""
:param wordset: an object with a ``__contains__`` method, such as a
set, containing strings to look for inside the tokens.
:param keep_compound: if True (the default), the original compound
token will be retained in the stream before the subwords.
"""
self.wordset = wordset
self.keep_compound = keep_compound
def subwords(self, s, memo):
if s in self.wordset:
return [s]
if s in memo:
return memo[s]
for i in xrange(1, len(s)):
prefix = s[:i]
if prefix in self.wordset:
suffix = s[i:]
suffix_subs = self.subwords(suffix, memo)
if suffix_subs:
result = [prefix] + suffix_subs
memo[s] = result
return result
return None
def __call__(self, tokens):
keep_compound = self.keep_compound
memo = {}
subwords = self.subwords
for t in tokens:
subs = subwords(t.text, memo)
if subs:
if len(subs) > 1 and keep_compound:
yield t
for subword in subs:
t.text = subword
yield t
else:
yield t
class BiWordFilter(Filter):
"""Merges adjacent tokens into "bi-word" tokens, so that for example::
"the", "sign", "of", "four"
becomes::
"the-sign", "sign-of", "of-four"
This can be used to create fields for pseudo-phrase searching, where if
all the terms match the document probably contains the phrase, but the
searching is faster than actually doing a phrase search on individual word
terms.
The ``BiWordFilter`` is much faster than using the otherwise equivalent
``ShingleFilter(2)``.
"""
def __init__(self, sep="-"):
self.sep = sep
def __call__(self, tokens):
sep = self.sep
prev_text = None
prev_startchar = None
prev_pos = None
atleastone = False
for token in tokens:
# Save the original text of this token
text = token.text
# Save the original position
positions = token.positions
if positions:
ps = token.pos
# Save the original start char
chars = token.chars
if chars:
sc = token.startchar
if prev_text is not None:
# Use the pos and startchar from the previous token
if positions:
token.pos = prev_pos
if chars:
token.startchar = prev_startchar
# Join the previous token text and the current token text to
# form the biword token
token.text = "".join((prev_text, sep, text))
yield token
atleastone = True
# Save the originals and the new "previous" values
prev_text = text
if chars:
prev_startchar = sc
if positions:
prev_pos = ps
# If no bi-words were emitted, that is, the token stream only had
# a single token, then emit that single token.
if not atleastone:
yield token
class ShingleFilter(Filter):
"""Merges a certain number of adjacent tokens into multi-word tokens, so
that for example::
"better", "a", "witty", "fool", "than", "a", "foolish", "wit"
with ``ShingleFilter(3, ' ')`` becomes::
'better a witty', 'a witty fool', 'witty fool than', 'fool than a',
'than a foolish', 'a foolish wit'
This can be used to create fields for pseudo-phrase searching, where if
all the terms match the document probably contains the phrase, but the
searching is faster than actually doing a phrase search on individual word
terms.
If | |
: list
list of Obs, e.g. [obs1, obs2, obs3].
all_configs : bool
if True, the reweighted observables are normalized by the average of
the reweighting factor on all configurations in weight.idl and not
on the configurations in obs[i].idl.
"""
result = []
for i in range(len(obs)):
if len(obs[i].cov_names):
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
if not set(obs[i].names).issubset(weight.names):
raise Exception('Error: Ensembles do not fit')
for name in obs[i].names:
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
new_samples = []
w_deltas = {}
for name in sorted(obs[i].names):
w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
if kwargs.get('all_configs'):
new_weight = weight
else:
new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
result.append(derived_observable(lambda x, **kwargs: x[0] / x[1], [tmp_obs, new_weight], **kwargs))
result[-1].reweighted = True
result[-1].is_merged = obs[i].is_merged
return result
def correlate(obs_a, obs_b):
"""Correlate two observables.
Parameters
----------
obs_a : Obs
First observable
obs_b : Obs
Second observable
Keep in mind to only correlate primary observables which have not been reweighted
yet. The reweighting has to be applied after correlating the observables.
Currently only works if ensembles are identical. This is not really necessary.
"""
if sorted(obs_a.names) != sorted(obs_b.names):
raise Exception('Ensembles do not fit')
if len(obs_a.cov_names) or len(obs_b.cov_names):
raise Exception('Error: Not possible to correlate Obs that contain covobs!')
for name in obs_a.names:
if obs_a.shape[name] != obs_b.shape[name]:
raise Exception('Shapes of ensemble', name, 'do not fit')
if obs_a.idl[name] != obs_b.idl[name]:
raise Exception('idl of ensemble', name, 'do not fit')
if obs_a.reweighted is True:
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
if obs_b.reweighted is True:
warnings.warn("The second observable is already reweighted.", RuntimeWarning)
new_samples = []
new_idl = []
for name in sorted(obs_a.names):
new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
new_idl.append(obs_a.idl[name])
o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
o.reweighted = obs_a.reweighted or obs_b.reweighted
return o
def covariance(obs1, obs2, correlation=False, **kwargs):
"""Calculates the covariance of two observables.
covariance(obs, obs) is equal to obs.dvalue ** 2
The gamma method has to be applied first to both observables.
If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
is constrained to the maximum value.
Keyword arguments
-----------------
correlation -- if true the correlation instead of the covariance is
returned (default False)
"""
def expand_deltas(deltas, idx, shape, new_idx):
"""Expand deltas defined on idx to a contiguous range [new_idx[0], new_idx[-1]].
New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest
common divisor of the step sizes is used as new step size.
Parameters
----------
deltas -- List of fluctuations
idx -- List or range of configs on which the deltas are defined.
Has to be a subset of new_idx.
shape -- Number of configs in idx.
new_idx -- List of configs that defines the new range.
"""
if type(idx) is range and type(new_idx) is range:
if idx == new_idx:
return deltas
ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
for i in range(shape):
ret[idx[i] - new_idx[0]] = deltas[i]
return ret
def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx, w_max):
gamma = np.zeros(w_max)
deltas1 = expand_deltas(deltas1, idx1, len(idx1), new_idx)
deltas2 = expand_deltas(deltas2, idx2, len(idx2), new_idx)
new_shape = len(deltas1)
max_gamma = min(new_shape, w_max)
# The padding for the fft has to be even
padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
gamma[:max_gamma] += (np.fft.irfft(np.fft.rfft(deltas1, padding) * np.conjugate(np.fft.rfft(deltas2, padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(deltas2, padding) * np.conjugate(np.fft.rfft(deltas1, padding)))[:max_gamma]) / 2.0
return gamma
if set(obs1.names).isdisjoint(set(obs2.names)):
return 0.
if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'):
raise Exception('The gamma method has to be applied to both Obs first.')
dvalue = 0
e_gamma = {}
e_dvalue = {}
e_n_tauint = {}
e_rho = {}
for e_name in obs1.mc_names:
if e_name not in obs2.mc_names:
continue
idl_d = {}
r_length = []
for r_name in obs1.e_content[e_name]:
if r_name not in obs2.e_content[e_name]:
continue
idl_d[r_name] = _merge_idx([obs1.idl[r_name], obs2.idl[r_name]])
if isinstance(idl_d[r_name], range):
r_length.append(len(idl_d[r_name]))
else:
r_length.append((idl_d[r_name][-1] - idl_d[r_name][0] + 1))
if not r_length:
return 0.
w_max = max(r_length) // 2
e_gamma[e_name] = np.zeros(w_max)
for r_name in obs1.e_content[e_name]:
if r_name not in obs2.e_content[e_name]:
continue
e_gamma[e_name] += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
if np.all(e_gamma[e_name] == 0.0):
continue
e_shapes = []
for r_name in obs1.e_content[e_name]:
e_shapes.append(obs1.shape[r_name])
gamma_div = np.zeros(w_max)
e_N = 0
for r_name in obs1.e_content[e_name]:
if r_name not in obs2.e_content[e_name]:
continue
gamma_div += calc_gamma(np.ones(obs1.shape[r_name]), np.ones(obs2.shape[r_name]), obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
e_N += np.sum(np.ones_like(idl_d[r_name]))
e_gamma[e_name] /= gamma_div[:w_max]
e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], e_rho[e_name][1:])))
# Make sure no entry of tauint is smaller than 0.5
e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
window = min(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
# Bias correction hep-lat/0306017 eq. (49)
e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
dvalue += e_dvalue[e_name]
for e_name in obs1.cov_names:
if e_name not in obs2.cov_names:
continue
dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)))
if np.abs(dvalue / obs1.dvalue / obs2.dvalue) > 1.0:
dvalue = np.sign(dvalue) * obs1.dvalue * obs2.dvalue
if correlation:
dvalue = dvalue / obs1.dvalue / obs2.dvalue
return dvalue
def pseudo_Obs(value, dvalue, name, samples=1000):
"""Generate a pseudo Obs with given value, dvalue and name
Parameters
----------
value : float
central value of the Obs to be generated.
dvalue : float
error of the Obs to be generated.
name : str
name of the ensemble for which the Obs is to be generated.
samples: int
number of samples for the Obs (default 1000).
"""
if dvalue <= 0.0:
return Obs([np.zeros(samples) + value], [name])
else:
for _ in range(100):
deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
deltas -= np.mean(deltas)
deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
deltas += value
res = Obs(deltas, [name])
res.gamma_method(S=2, tau_exp=0)
if abs(res.dvalue - dvalue) < 1e-10 * dvalue:
break
res._value = float(value)
return res
def import_jackknife(jacks, name, idl=None):
"""Imports jackknife samples and returns an Obs
Parameters
----------
jacks : numpy.ndarray
numpy array containing the mean value as zeroth entry and
the N jackknife samples as first to Nth entry.
name : str
name of the ensemble the samples are defined on.
"""
length = len(jacks) - 1
prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
samples = jacks[1:] @ prj
new_obs = Obs([samples], [name], idl=idl)
new_obs._value = jacks[0]
return new_obs
def merge_obs(list_of_obs):
"""Combine all observables in list_of_obs into one new observable
Parameters
----------
list_of_obs : list
list of the Obs object to be combined
It is not possible to combine obs which are based on the same replicum
"""
replist = [item for obs in list_of_obs for item in obs.names]
if (len(replist) == len(set(replist))) is False:
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
if any([len(o.cov_names) for o in list_of_obs]):
raise Exception('Not possible to merge data that contains covobs!')
new_dict = {}
idl_dict = {}
for o in list_of_obs:
new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
for key in set(o.deltas) | set(o.r_values)})
idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
names = sorted(new_dict.keys())
o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
return o
def cov_Obs(means, cov, name, grad=None):
"""Create an Obs based on mean(s) and a covariance matrix
Parameters
----------
mean : list of floats or float
N mean value(s) of the new Obs
cov : list or array
2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
name : str
identifier for the covariance matrix
grad : list or array
Gradient of the Covobs wrt. the means belonging to cov.
"""
def covobs_to_obs(co):
"""Make an Obs out of a Covobs
Parameters
----------
co : Covobs
Covobs to be embedded into the Obs
"""
o = Obs([], [])
o._value = | |
[0] + [item[2] for item in query_result] + [1],
)
auc = 0
for i in range(len(recall) - 1):
if recall[i + 1] - recall[i] != 0.0:
a = (precision[i + 1] - precision[i]) / (recall[i + 1] - recall[i])
b = precision[i + 1] - a * recall[i + 1]
auc = (
auc
+ a * (recall[i + 1] * recall[i + 1] - recall[i] * recall[i]) / 2
+ b * (recall[i + 1] - recall[i])
)
auc = -auc
if auc_prc:
return auc
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
param = {"color": color_dict(style_kwds, 0)}
ax.plot(recall, precision, **updated_dict(param, style_kwds))
ax.fill_between(
recall,
[0 for item in recall],
precision,
facecolor=color_dict(style_kwds, 0),
alpha=0.1,
)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title("PRC Curve")
ax.text(
0.995,
0,
"AUC = " + str(round(auc, 4) * 100) + "%",
verticalalignment="bottom",
horizontalalignment="right",
fontsize=11.5,
)
ax.set_axisbelow(True)
ax.grid()
return tablesample(
values={"threshold": threshold, "recall": recall, "precision": precision},
)
# ---#
def randomized_features_search_cv(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
training_score: bool = True,
comb_limit: int = 100,
skip_error: bool = True,
print_info: bool = True,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the k-fold grid search of an estimator using different features
combinations. It can be used to find the parameters which will optimize
the model.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
comb_limit: int, optional
Maximum number of features combinations used to train the model.
skip_error: bool, optional
If set to True and an error occurs, it will be displayed and not raised.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("metric", metric, [str]),
("training_score", training_score, [bool]),
("skip_error", skip_error, [bool, str,]),
("print_info", print_info, [bool,]),
("comb_limit", comb_limit, [int,]),
]
)
if category_from_model_type(estimator.type)[0] == "regressor" and metric == "auto":
metric = "rmse"
elif metric == "auto":
metric = "logloss"
if len(X) < 20:
all_configuration = all_comb(X)
if len(all_configuration) > comb_limit and comb_limit > 0:
all_configuration = random.sample(all_configuration, comb_limit)
else:
all_configuration = []
for k in range(max(comb_limit, 1)):
config = sorted(random.sample(X, random.randint(1, len(X))))
if config not in all_configuration:
all_configuration += [config]
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])) and print_info:
from tqdm.auto import tqdm
loop = tqdm(all_configuration)
else:
loop = all_configuration
data = []
for config in loop:
if config:
config = list(config)
try:
current_cv = cross_validate(
estimator,
input_relation,
config,
y,
metric,
cv,
pos_label,
cutoff,
True,
training_score,
tqdm=False,
)
if training_score:
keys = [elem for elem in current_cv[0].values]
data += [
(
config,
current_cv[0][keys[1]][cv],
current_cv[1][keys[1]][cv],
current_cv[0][keys[2]][cv],
current_cv[0][keys[1]][cv + 1],
current_cv[1][keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Features: {config}; \033[91mTest_score: {current_cv[0][keys[1]][cv]}\033[0m; \033[92mTrain_score: {current_cv[1][keys[1]][cv]}\033[0m; \033[94mTime: {current_cv[0][keys[2]][cv]}\033[0m;")
else:
keys = [elem for elem in current_cv.values]
data += [
(
config,
current_cv[keys[1]][cv],
current_cv[keys[2]][cv],
current_cv[keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Features: {config}; \033[91mTest_score: {current_cv[keys[1]][cv]}\033[0m; \033[94mTime:{current_cv[keys[2]][cv]}\033[0m;")
except Exception as e:
if skip_error and skip_error != "no_print":
print(e)
elif not(skip_error):
raise (e)
if not(data):
if training_score:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_train_score": [],
"avg_time": [],
"score_std": [],
"score_train_std": [],
}
)
else:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_time": [],
"score_std": [],
}
)
reverse = reverse_score(metric)
data.sort(key=lambda tup: tup[1], reverse=reverse)
if training_score:
result = tablesample(
{
"features": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_train_score": [elem[2] for elem in data],
"avg_time": [elem[3] for elem in data],
"score_std": [elem[4] for elem in data],
"score_train_std": [elem[5] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mRandomized Features Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Features: {result['features'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[92mTrain_score: {result['avg_train_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
else:
result = tablesample(
{
"features": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_time": [elem[2] for elem in data],
"score_std": [elem[3] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mRandomized Features Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Features: {result['features'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
return result
# ---#
def randomized_search_cv(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
nbins: int = 1000,
lmax: int = 4,
optimized_grid: int = 1,
print_info: bool = True,
):
"""
---------------------------------------------------------------------------
Computes the K-Fold randomized search of an estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
nbins: int, optional
Number of bins used to compute the different parameters categories.
lmax: int, optional
Maximum length of each parameter list.
optimized_grid: int, optional
If set to 0, the randomness is based on the input parameters.
If set to 1, the randomness is limited to some parameters while others
are picked based on a default grid.
If set to 2, there is no randomness and a default grid is returned.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
param_grid = gen_params_grid(estimator, nbins, len(X), lmax, optimized_grid)
return grid_search_cv(
estimator,
param_grid,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
"no_print",
print_info,
)
# ---#
def roc_curve(
y_true: str,
y_score: | |
# !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: bilateral_filter_np.py
# @brief:
# @author: <NAME>, <EMAIL>, <EMAIL>
# @version: 0.0.1
# @creation date: 26-01-2020
# @last modified: Sun 26 Jan 2020 03:06:15 AM EST
# > see: http://jamesgregson.ca/bilateral-filtering-in-python.html
import numpy as np
import src.pfmutil as pfm
import sys
import math
import torch
import torch.nn as nn
#NOTE: This file is not really used in the project. It is just used to verify the pytorch version!!!
def filter_bilateral( img_in, sigma_s, sigma_v, reg_constant=1e-8 ):
"""Simple bilateral filtering of an input image
Performs standard bilateral filtering of an input image. If padding is desired,
img_in should be padded prior to calling
Args:
img_in (ndarray) monochrome input image
sigma_s (float) spatial gaussian std. dev.
sigma_v (float) value gaussian std. dev.
reg_constant (float) optional regularization constant for pathalogical cases
Returns:
result (ndarray) output bilateral-filtered image
Raises:
ValueError whenever img_in is not a 2D float32 valued np.ndarray
"""
# check the input
if not isinstance( img_in, np.ndarray ) or img_in.dtype != 'float32' or img_in.ndim != 2:
raise ValueError('Expected a 2D np.ndarray with float32 elements')
# make a simple Gaussian function taking the squared radius
gaussian = lambda r2, sigma: (np.exp( -0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0
# define the window width to be the 3 time the spatial std. dev. to
# be sure that most of the spatial kernel is actually captured
win_width = int( 3*sigma_s+1 )
# initialize the results and sum of weights to very small values for
# numerical stability. not strictly necessary but helpful to avoid
# wild values with pathological choices of parameters
wgt_sum = np.ones( img_in.shape )*reg_constant
result = img_in*reg_constant
# accumulate the result by circularly shifting the image across the
# window in the horizontal and vertical directions. within the inner
# loop, calculate the two weights and accumulate the weight sum and
# the unnormalized result image
for shft_x in range(-win_width,win_width+1):
for shft_y in range(-win_width,win_width+1):
# compute the spatial weight
w = gaussian( shft_x**2+shft_y**2, sigma_s )
# shift by the offsets
off = np.roll(img_in, [shft_y, shft_x], axis=[0,1] )
# compute the value weight
tw = w*gaussian( (off-img_in)**2, sigma_v )
# accumulate the results
result += off*tw
wgt_sum += tw
# normalize the result and return
return result/wgt_sum
def gaussian_weights(sigma):
kr = math.ceil(sigma*3)
ks = int(kr*2+1)
k = np.zeros((ks,ks))
for i in range(0,ks):
for j in range(0,ks):
y = i-kr
x = j-kr
k[i,j] = math.exp( - (x*x+y*y)/ (2*sigma*sigma) )
return k.astype(np.float32)
def filter_bilateral_with_embeding( x_in, embed_in, sigma_s = 10.0, sigma_v = 0.1, reg_constant=1e-20,
hard_lambda = 1.0):
"""Simple bilateral filtering of an input image
Performs standard bilateral filtering of an input image. If padding is desired,
embed_in should be padded prior to calling
Args:
x_in (ndarray) input array, in size [H,W,C], could be any input, like, RGB image, gray image, or cost volume, or cost volume slice
embed_in (ndarray) embeding feature, used for generatin mask, in size [H,W,F] (same to PyTorch)
sigma_s (float) spatial gaussian std. dev.
sigma_v (float) value gaussian std. dev.
reg_constant (float) optional regularization constant for pathalogical cases
hard_lambda (float) hardness of mask based on embed_in, see parameter lambda in equ (3) in <NAME>' paper "Segmentation-Aware Convolutional Networks Using Local Attention Masks (ICCV'17)"
Returns:
result (ndarray) output bilateral-filtered image
Raises:
ValueError whenever embed_in is not a 3D float32 valued np.ndarray
"""
# check the input
if not isinstance( embed_in, np.ndarray ) or embed_in.dtype != 'float32' or embed_in.ndim != 3:
raise ValueError('Expected embed_in a 3D np.ndarray with float32 elements')
if not isinstance( x_in, np.ndarray ) or x_in.dtype != 'float32' or x_in.ndim != 3:
raise ValueError('Expected x_in a 3D np.ndarray with float32 elements')
H, W, F = embed_in.shape[:]
_,_, C = x_in.shape[:]
#print ('embed_in shape = ', embed_in.shape)
#print ('x_in shape = ', x_in.shape)
# make a simple Gaussian function taking the squared radius
#gaussian = lambda r2, sigma, c: (np.exp( c*-0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0
gaussian = lambda r2, sigma, c: np.exp( c*-0.5*r2/sigma**2)
# define the window width to be the 3 time the spatial std. dev. to
# be sure that most of the spatial kernel is actually captured
win_width = int( 3*sigma_s+1 )
#print ('win_size = {} x {}'.format(2*win_width+1, 2*win_width + 1))
# initialize the results and sum of weights to very small values for
# numerical stability. not strictly necessary but helpful to avoid
# wild values with pathological choices of parameters
wgt_sum = np.ones([H,W])*reg_constant
#print ("showing initial wgt_sum ")
#pfm.show(wgt_sum)
result = x_in * reg_constant
L2_dist_suqre = lambda x,y: ( np.sum((x-y)**2, axis = -1))
# accumulate the result by circularly shifting the image across the
# window in the horizontal and vertical directions. within the inner
# loop, calculate the two weights and accumulate the weight sum and
# the unnormalized result image
""" generate gaussian filter for space weight """
sw_filter = np.zeros(( 2*win_width + 1, 2*win_width + 1))
for shft_y in range(-win_width,win_width+1):
for shft_x in range(-win_width,win_width+1):
# compute the spatial weight
y_idx = shft_y + win_width
x_idx = shft_x + win_width
#print (type(x_idx), (type(y_idx)))
sw = gaussian(shft_x**2+shft_y**2, sigma_s, hard_lambda) # scalar sw
#print ("sw_filter[%d, %d] = %f" %(y_idx, x_idx, sw))
sw_filter[y_idx, x_idx] = sw
#print ("sw_filter = ", sw_filter)
# verify the sw_filter
#sw_filter2 = gaussian_weights(sigma_s)
#print (sw_filter2)
#sys.exit()
rw_accum = []
for shft_y in range(-win_width,win_width+1):
for shft_x in range(-win_width,win_width+1):
# compute the spatial weight
#sw = gaussian( shft_x**2+shft_y**2, sigma_s, hard_lambda) # scalar sw
sw = sw_filter[shft_y + win_width, shft_x + win_width]
print ("sw[%d][%d] = %f" %(shft_y + win_width, shft_x + win_width, sw))
# shift by the offsets
off_embed = np.roll(embed_in, [shft_y, shft_x, 0], axis=[0,1,2])
off_x = np.roll(x_in, [shft_y, shft_x, 0], axis=[0,1,2])
# range weight (rw) : [H, W]
rw = gaussian(L2_dist_suqre(off_embed, embed_in),sigma_v, hard_lambda)
#print ('rw shape = ', rw.shape)
rw_accum.append(rw)
#NOTE: debugging
#sw = 1.0
tw =sw*rw # in shape [H, W]
# accumulate the results
#NOTE:
# off_x in shape [H,W,C] (note: here C could be ndisp if x_in = cost volume)
result += off_x * np.expand_dims(tw, axis = -1) # [H,W,C]
wgt_sum += tw
if 0:
rw_all = np.stack(rw_accum, axis=-1)
print ('rw_all shape = ', rw_all.shape)
for i in range(0, rw_all.shape[2]):
shft_x = i % (2*win_width+1) - win_width
shft_y = i / (2*win_width+1) - win_width
print("show shift [%d][%d] " %( shft_y, shft_x))
pfm.show(rw_all[:,:,i])
# normalize the result and return
return result/ np.expand_dims(wgt_sum, axis = -1), wgt_sum # in shape [H,W,C]
""" im2col operation """
#> see https://stackoverflow.com/questions/30109068/implement-matlabs-im2col-sliding-in-python;
from skimage.util import view_as_windows
def im2col(x, k = 3):
""" args:
x: input, in shape [H,W,C]
k: kernel size, k x k
return:
y: in shape [H,W,k*k*C]
"""
H,W,C = x.shape[:]
pad_h = k/2
pad_w = k/2
""" padding """
x = np.pad(x, ((pad_h, pad_h), (pad_w, pad_w), (0,0)),
mode = 'constant',
#mode= 'edge'
)
window_shape = (k, k, C)
x = np.squeeze(view_as_windows(x, window_shape, step = 1)) #in shape (H, W, k, k, C)
x = np.reshape(x, [H, W, k*k, C]) #in shape (H, W, k*k, C)
return x
def get_gaussian_filter_width_from_sigma(sigma_s):
return int(3*sigma_s + 1)
def filter_bilateral_with_embeding_im2col(
x_in, embed_in, sigma_s = 10.0, sigma_v = 0.1,
reg_constant=1e-20,
hard_lambda = 1.0):
""" using im2col to change the filtering to matrix multiplication
Performs standard bilateral filtering of an input image. If padding is desired,
embed_in should be padded prior to calling
Args:
x_in (ndarray) input array, in size [H,W,C], could be any input, like, RGB image, gray image, or cost volume, or cost volume slice
embed_in (ndarray) embeding feature, used for generatin mask, in size [H,W,F] (same to PyTorch)
sigma_s (float) spatial gaussian std. dev.
sigma_v (float) value gaussian std. dev.
reg_constant (float) optional regularization constant for pathalogical cases
hard_lambda (float) hardness of mask based on embed_in, see parameter lambda in equ (3) in <NAME>' paper "Segmentation-Aware Convolutional Networks Using Local Attention Masks (ICCV'17)"
Returns:
result (ndarray) output bilateral-filtered image
Raises:
ValueError whenever embed_in is not a 3D float32 valued np.ndarray
"""
# check the input
if not isinstance( embed_in, | |
opiate opiates opine
opined opines opining opportunism opportunistic oppressively
opprobrious opprobrium optically optimistically optometry opulence
oracular orally orangeade orangeades orate orated orates orating
oratorical oratorio oratorios orb orbs orderings orderliness ordinal
ordinals ordinariness ordnance ordure oregano organdy organelle
organelles organically orgasmic orgasms orgiastic orientals orifices
origami origination oriole orioles ormolu ornamentation ornateness
ornerier orneriest ornery orotund orthodontia orthodontic orthodontics
orthodoxies orthodoxy orthographic orthographies oscillator
oscillators oscilloscopes osier osiers osmotic osprey ospreys
ossification ossified ossifies ossify ossifying ostentatiously
osteopath osteopaths osteopathy osteoporosis ostracism otherworldly
otiose ottoman ottomans outage outages outback outbacks outbalance
outbalanced outbalances outbalancing outbid outbidding outbids
outbounds outbuilding outbuildings outcrop outcropped outcropping
outcroppings outcrops outfielder outfielders outfitter outfitters
outflank outflanked outflanking outflanks outfox outfoxed outfoxes
outfoxing outgo outgoes outlandishly outperform outperformed
outperforming outperforms outplacement outplay outplayed outplaying
outplays outpouring outpourings outrank outranked outranking outranks
outre outreach outreached outreaches outreaching outrider outriders
outrigger outriggers outsell outselling outsells outsize outsizes
outsold outsource outsourced outsources outsourcing outspokenly
outspokenness outspread outspreading outspreads outstay outstayed
outstaying outstays outstretch outstretched outstretches outstretching
outtake outtakes outvote outvoted outvotes outvoting outwear
outwearing outwears outwore outworn ovarian overabundance overabundant
overachieve overachieved overachiever overachievers overachieves
overachieving overact overacted overacting overactive overacts overage
overages overambitious overanxious overawe overawed overawes overawing
overbalance overbalanced overbalances overbalancing overbite overbites
overbook overbooked overbooking overbooks overcautious overcompensate
overcompensated overcompensates overcompensating overcompensation
overconfident overcook overcooked overcooking overcooks overdrafts
overdress overdressed overdresses overdressing overdrive overeager
overenthusiastic overexpose overexposed overexposes overexposing
overexposure overextend overextended overextending overextends
overfull overgenerous overgrowth overindulge overindulged
overindulgence overindulges overindulging overjoy overjoyed overjoying
overjoys overkilled overkilling overkills overlord overlords overmuch
overmuches overpaid overpay overpaying overpays overplay overplayed
overplaying overplays overpopulate overpopulated overpopulates
overpopulating overproduce overproduced overproduces overproducing
overproduction overprotective overqualified overreach overreached
overreaches overreaching overreaction overreactions overripe oversell
overselling oversells oversensitive oversexed overshoe overshoes
oversimplifications oversimplified oversimplifies oversimplify
oversimplifying oversize oversizes oversizing oversold overspend
overspending overspends overspent overspill overspread overspreading
overspreads overstatement overstatements overstay overstayed
overstaying overstays overstock overstocked overstocking overstocks
overstuffed oversupplied oversupplies oversupply oversupplying overtax
overtaxed overtaxes overtaxing overviews overweening overzealous
oviduct oviducts oviparous ovoid ovoids ovulate ovulated ovulates
ovulating ovulation ovule ovules ow owlet owlets owlish oxbow oxbows
oxford oxfords oxyacetylene oxygenate oxygenated oxygenates
oxygenating oxygenation oxymora oxymoron p pH pacesetter pacesetters
pachyderm pachyderms pacifically pacification padre padres paean
paeans paediatrics paganism pagers paginate paginated paginates
paginating pailful pailfuls painkiller painkillers painstakingly
paintbrush paintbrushes painters paintwork pairwise paisley paisleys
palatal palatals palaver palavered palavering palavers paleface
palefaces paleness palimony palimpsest palimpsests palindrome
palindromes palindromic palings palisade palisades palladium pallet
pallets palliate palliated palliates palliating palliation palliative
palliatives palmetto palmettos palmier palmiest palmist palmistry
palmists palmy palpate palpated palpates palpating palpation palpitate
palpitated palpitates palpitating palpitation palpitations palsied
palsies palsy palsying paltriness pampas pamphleteer pamphleteers
panache panchromatic pandemic pandemics panderer panderers panegyric
panegyrics panelist panelists pannier panniers panoplies panoply
pantaloons pantheism pantheist pantheistic pantheists pantheon
pantheons pantsuit pantsuits pantyhose papaw papaws paperboy paperboys
papergirl papergirls paperhanger paperhangers papery papilla papillae
papoose papooses papped papping paps parabola parabolas parabolic
parachutist parachutists paradigmatic paradigms paralegal paralegals
parallax parallaxes parallelism parallelisms parallelogram
parallelograms paramecia paramecium paramedic paramedical paramedicals
paramedics paramilitaries paramilitary paramour paramours paranormal
parapet parapets paraplegia paraprofessional paraprofessionals
parapsychology paratroops parboil parboiled parboiling parboils
parenthetic parenthetically parfait parfaits pariah pariahs parings
parlance parlay parlayed parlaying parlays parley parleyed parleying
parleys parliamentarian parliamentarians parochialism parolee parolees
paroxysm paroxysms parquet parqueted parqueting parquetry parquets
parricide parricides parried parries parry parrying parsimonious
parsimony partaker partakers parterre parterres parthenogenesis
participator participators participatory participial particularities
particularity particulate particulates partisanship parturition
partway parvenu parvenus paschal pasha pashas passably passel passels
passerby passersby passionless passivity passkey passkeys pasteboard
pastern pasterns pastiches pastorate pastorates pastrami pasturage
patchier patchiest patchiness patella patellae patellas paternalistic
paternally pathogen pathogenic pathogens pathologically patina patinas
patois patriarchies patriarchy patrician patricians patricide
patricides patrimonial patriotically patrolman patrolmen patrolwoman
patrolwomen patronymic patronymics patsies patsy pauperism pavings
pawl pawls pawnshop pawnshops paycheck paychecks payday paydays payee
payees payloads paymaster paymasters peaceably peacefulness
peacekeeping peacetime peafowl peafowls peahen peahens pearlier
pearliest pearly peasantry pebblier pebbliest pebbly peccadillo
peccadilloes peccaries peccary pectin pectoral pectorals pecuniary
pedagogic pedagogical pedagogics pedagogue pedagogued pedagogues
pedagoguing pedantically pederast pederasts pederasty pedicure
pedicured pedicures pedicuring pedigreed pediment pediments pedometer
pedometers pee peed peeing peekaboo peeper peepers peephole peepholes
peerage peerages pees peevishly peevishness peewee peewees pejorative
pejoratives pekoe pelagic pellagra pellucid penchants pendent pendents
pendulous penetrable penetrative penile peninsular penitential
penitently penlight penlights pennon pennons pennyweight pennyweights
penologist penologists penology pensiveness pent pentameter
pentameters pentathlon pentathlons pents penultimates penurious penury
peonage peppercorn peppercorns pepperoni pepperonis peppery peppier
peppiest peppy pepsin peptic peptics perambulate perambulated
perambulates perambulating perambulator perambulators percale percales
perceivable percentile percentiles perceptibly perceptively
perceptiveness perceptual percussionist percussionists perdition
peregrination peregrinations peremptorily perennially perfectible
perfectionism perfidies perfidious perfidy perforce perfumeries
perfumery pericardia pericardium perigee perigees perihelia perihelion
periodicity periodontal peripatetic peripatetics periphrases
periphrasis peritoneum peritoneums peritonitis periwig periwigged
periwigging periwigs periwinkle periwinkles perjurer perjurers
perkiness perm permafrost permeability permeable permed perming
permissibly permissively permissiveness perms permute permuted
permutes permuting perniciously peroration perorations perpetration
perpetuation perpetuity perquisite perquisites persiflage persimmon
persimmons persnickety personae personage personages perspicacious
perspicacity perspicuity perspicuous persuasiveness pertinacious
pertinacity pertinence pertly pertness perturbation perturbations
perversely perverseness perversity peseta pesetas peso pesos
pessimistically pestilent pestle pestled pestles pestling petard
petards petiole petioles petitioner petitioners petrel petrels
petrifaction petrochemical petrochemicals petrolatum pettifog
pettifogged pettifogger pettifoggers pettifogging pettifogs pettily
petulance petulantly pewee pewees peyote phalanges phalanx phalanxes
phalli phallic phallus phantasm phantasmagoria phantasmagorias
phantasms pharaoh pharaohs pharmacologist pharmacologists pharmacology
pharmacopoeia pharmacopoeias pharyngeal pharynges pharynx
phenobarbital phenotype pheromone pheromones phial phialled phialling
phials philander philandered philanderer philanderers philandering
philanders philanthropically philatelic philatelist philatelists
philately philharmonic philharmonics philippic philippics philistine
philistines philodendron philodendrons philological philologist
philologists philology philosophic philosophically philter philters
phlebitis phlegmatically phloem phlox phobic phobics phoebe phoebes
phoenixes phoneme phonemes phonemic phonemics phonetically phonetician
phoneticians phonic phonically phoniness phonological phonologist
phonologists phonology phooey phooeys phosphate phosphates phosphoric
phosphors photoelectric photographically photojournalism
photojournalist photojournalists photosensitive phototypesetting
phrasal phrasings phrenology phyla phylae phylum physicked physicking
physiognomies physiognomy physiologist physiologists physiotherapist
physiotherapists physiotherapy pianissimo pianissimos pianoforte
pianofortes piazza piazzas pica picaresque picaresques picayune
piccalilli picker pickerel pickerels pickers pickings picnicker
picnickers pictograph pictographs pictorially pidgin pidgins piebald
piebalds pied pieing piercingly piercings piffle piggier piggies
piggiest piggishness piggy piglet piglets pigmentation pigskin
pigskins pigsties pigsty piing piker pikers pilaf pilafs pilaster
pilasters pilchard pilchards pileup pileups pilferer pilferers pilings
pillbox pillboxes pillion pillioned pillioning pillions pilloried
pillories pillory pillorying pilothouse pilothouses pimento pimentos
pimiento pimientos pimp pimped pimpernel pimpernels pimping pimps
pinafore pinafores pinball pincer pincers pinfeather pinfeathers ping
pinged pinging pings pinhead pinheads pinhole pinholes pinkeye pinkie
pinkies pinkish pinnate pinochle pinprick pinpricked pinpricking
pinpricks pinstripe pinstriped pinstripes pinto pintos pinup pinups
pinwheel pinwheeled pinwheeling pinwheels piously pip piper pipers
pipit pipits pipped pippin pipping pippins pips pipsqueak pipsqueaks
piquancy piquant piratical piscatorial piss pissed pisses pissing
pistil pistillate pistils pita pitchblende pitchman pitchmen pith
pithily pitiable pitiably pitilessly piton pitons pituitaries
pituitary pixel pixels pizazz pizzeria pizzerias pizzicati pizzicato
placation placebo placebos placeholder placements placental placentals
placer placers placidity placket plackets plainclothes plainclothesman
plainclothesmen plainness plaint plaintively plaints plait plaited
plaiting plaits plangent plannings plantings plasterboard plasterer
plasterers plasticity plateful platefuls platelet platelets platen
platens platitudinous platonic platypus platypuses plaudit plaudits
playact playacted playacting playacts playbacks playbill playbills
playboy playboys playgoer playgoers playoff playoffs playroom
playrooms pleader pleaders pleasantness pleasingly pleasurably
plebeian plebeians plebiscite plebiscites plectra plectrum plectrums
plenaries plenary plenipotentiaries plenipotentiary plenitude
plenitudes plenteous pleurisy plexus plexuses pliability pliancy
plinth plinths plodder plodders ploddings plottered plottering plover
plovers pluckier pluckiest pluckiness plumpness plunderer plunderers
plunk plunked plunking plunks pluperfect pluperfects pluralism
pluralistic pluralities plushier plushiest plushy plutocracies
plutocracy plutocrat plutocratic plutocrats pneumatically pock pocked
pocketful pocketfuls pocketknife pocketknives pocking pocks podiatrist
podiatrists podiatry poesied poesies poesy poesying poetess poetesses
poetically pogrom pogromed pogroming pogroms poi poignantly pointier
pointiest pointillism pointillist pointillists pointlessness pointy
poisoner poisoners poisonings poisonously pol polarities polecat
polecats polemical polestar polestars policyholder policyholders
poliomyelitis polisher polishers politesse politic politicked
politicking politico politicos polities polity polliwog polliwogs
polluter polluters polonaise polonaises polonium pols poltergeist
poltergeists poltroon poltroons polyester polyesters polyethylene
polygamist polygamists polyglot polyglots polygonal polygraph
polygraphed polygraphing polygraphs polyhedron polyhedrons polymath
polymaths polymer polymeric polymers polymorphic polyphonic polyphony
polystyrene polysyllabic polysyllable polysyllables polytechnics
polytheism polytheist polytheistic polytheists polythene
polyunsaturated pomade pomaded pomades pomading pommel pommels
pompadour pompadoured pompadours pompom pompoms pomposity pompously
pompousness ponderously pone pones poniard poniards pontiff pontiffs
pontifical pontificate pontificated pontificates pontificating
ponytail ponytails pooch pooched pooches pooching pooh poohed poohing
poohs poorhouse poorhouses popes popgun popguns popinjay popinjays
poplin popover popovers poppa poppas poppycock populism populist
populists porcine porn porno pornographer pornographers porosity
porphyry porringer porringers portage portaged portages portaging
portcullis portcullises portentous portentously porterhouse
porterhouses portliness portmanteau portmanteaus portraitist
portraitists portraiture poser posers poseur poseurs posh poshed
posher poshes poshest poshing posit posited positing positron
positrons posits posse posses possessively possessiveness postcodes
postdate postdated postdates postdating postdoc postdocs postdoctoral
posthaste postlude postludes postmistress postmistresses postmodern
postmortem postmortems postnatal postoperative postpaid postpartum
postwar potable potables potash potbellied potbellies potbelly
potboiler potboilers potentate potentates potentialities potentiality
potentials potful potfuls potholder potholders pothook pothooks
potluck potlucks potpie potpies potpourri potpourris potsherd
potsherds potshot potshots pottage pottier potties pottiest potty
poultice poulticed | |
#!/usr/bin/env python
"""
Isosurface rendering results in black image and warning
OSPRAY STATUS: ospray::Isosurfaces deprecated parameter use. Isosurfaces will begin taking an OSPVolume directly, with appearance set through the GeometricModel instead.
OSPRAY STATUS: ospray::Isosurfaces created: #primitives=1
Even though scene setup appears to be correct
"""
# E.g.
# ./samples/volrender.py -s 1,1,2 -d 256,256,84 -v 0,255 -t ~/models/brain/carnival.trn ~/models/brain/brain256_256_84_8.raw
# ./samples/volrender.py -i 128 -s 1,1,2 -d 256,256,84 -v 0,255 -t ~/models/brain/carnival.trn ~/models/brain/brain256_256_84_8.raw
# ./samples/volrender.py -c 'z<0.035' -i 4 -D u -s 0.009,0.009,0.005 u_00659265.h5
# ./samples/volrender.py -b 0,0,0 -c 'z<0.035' -D u -t tf2.trn -s 0.009,0.009,0.005 u_00659265.h5
import sys, getopt, os, time
scriptdir = os.path.split(__file__)[0]
sys.path.insert(0, os.path.join(scriptdir, '..'))
import numpy
from PIL import Image
import ospray
t0 = time.time()
W = 960
H = 540
RENDERER = 'scivis'
argv = ospray.init(sys.argv)
try:
# Needs to go after ospInit apparently, as get a lockup during exit otherwise
import h5py
have_h5py = True
except ImportError:
have_h5py = False
try:
import vtk
have_vtk = True
except ImportError:
have_vtk = False
# Enable logging output
def error_callback(error, details):
print('OSPRAY ERROR: %d (%s)' % (error, details))
def status_callback(message):
print('OSPRAY STATUS: %s' % message)
ospray.set_error_callback(error_callback)
ospray.set_status_callback(status_callback)
device = ospray.get_current_device()
device.set_param('logLevel', 1)
#device.set_param('logOutput', 'cerr')
#device.set_param('errorOutput', 'cerr')
device.commit()
# Parse arguments
def usage():
print('%s [options] file.raw|file.h5|file.hdf5' % sys.argv[0])
print()
print('Options:')
print(' -a anisotropy Volume anisotropy')
print(' -b r,g,b[,a] Background color')
print(' -c [xyz][<>]<value> Set clip plane')
print(' -d xdim,ydim,zdim .raw file dimensions')
print(' -D dataset_name HDF5 dataset name')
print(' -f axis,minidx,maxidx,value Fill part of the volume with a specific value')
print(' -t <default>|<linear>|<file.trn> Set transfer function')
print(' -i isovalue Render as isosurface instead of volume')
print(' -I width,height Image resolution')
print(' -o output-image Output file (default: volume.png)')
print(' -p Use pathtracer (default: use scivis renderer)')
print(' -s xs,ys,zs Grid spacing')
print(' -S samples Samples per pixel')
print(' -v minval,maxval Volume value range')
print(' -x Display image after rendering (uses tkinter)')
print()
print('When reading a .raw file 8-bit unsigned integers are assumed')
print()
anisotropy = 0.0
bgcolor = (1.0, 1.0, 1.0, 1.0)
clipping_gmodel = None
dimensions = None
dataset_name = None
tf_mode = 'default'
tf_file = None
image_file = 'volume.png'
isovalue = None
grid_spacing = numpy.ones(3, dtype=numpy.float32)
samples = 4
set_value = None
value_range = None
display_result = False
try:
optlist, args = getopt.getopt(argv[1:], 'a:b:c:d:D:f:i:I:o:ps:S:t:v:x')
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
for o, a in optlist:
if o == '-a':
anisotropy = float(a)
elif o == '-b':
bgcolor = list(map(float, a.split(',')))
assert len(bgcolor) in [3,4]
if len(bgcolor) == 3:
bgcolor.append(1.0)
bgcolor = tuple(bgcolor)
elif o == '-c':
assert a[0] in ['x', 'y', 'z']
axis = a[0]
a = a[1:]
if a[0] == '<':
value = float(a[1:])
invert_clip_plane_normal = True
elif a[0] == '>':
value = float(a[1:])
invert_clip_plane_normal = False
else:
assert False and "Invalid clip plane format"
if axis == 'x':
clip_plane_coefficients = (-1, 0, 0, -value)
elif axis == 'y':
clip_plane_coefficients = (0, -1, 0, -value)
else:
clip_plane_coefficients = (0, 0, -1, -value)
c = numpy.array([clip_plane_coefficients], 'float32')
g = ospray.Geometry('plane')
g.set_param('plane.coefficients', ospray.copied_data_constructor_vec(c))
g.commit()
clipping_gmodel = ospray.GeometricModel(g)
clipping_gmodel.set_param('invertNormals', invert_clip_plane_normal)
clipping_gmodel.commit()
elif o == '-d':
dimensions = tuple(map(int, a.split(',')))
assert len(dimensions) == 3
elif o == '-D':
dataset_name = a
elif o == '-f':
pp = a.split(',')
assert len(pp) == 4
set_value = (int(pp[0]), int(pp[1]), int(pp[2]), float(pp[3]))
elif o == '-i':
isovalue = float(a)
elif o == '-I':
W, H = map(int, a.split(','))
elif o == '-o':
image_file = a
elif o == '-p':
RENDERER = 'pathtracer'
elif o == '-s':
grid_spacing = tuple(map(float, a.split(',')))
assert len(grid_spacing) == 3
grid_spacing = numpy.array(grid_spacing, dtype=numpy.float32)
elif o == '-S':
samples = int(a)
elif o == '-t':
if os.path.isfile(a):
tf_mode = 'file'
tf_file = a
else:
assert a in ['default', 'linear']
tf_mode = a
elif o == '-v':
value_range = tuple(map(float, a.split(',')))
elif o == '-x':
display_result = True
if len(args) == 0:
usage()
sys.exit(2)
volfile = args[0]
# Read file
extent = numpy.zeros((2,3), 'float32')
ext = os.path.splitext(volfile)[-1]
if ext == '.raw':
assert dimensions is not None and 'Set dimensions with -d x,y,z'
data = numpy.fromfile(volfile, dtype=numpy.uint8)
data = data.reshape(dimensions)
extent[1] = dimensions * grid_spacing
elif ext in ['.h5', '.hdf5']:
assert have_h5py and 'h5py module could not be loaded!'
assert dataset_name and 'Set hdf5 dataset name with -D name'
f = h5py.File(volfile, 'r')
dset = f[dataset_name]
data = numpy.array(dset[:])
f.close()
# KJI -> IJK
data = numpy.swapaxes(data, 0, 2)
dimensions = data.shape
if value_range is None:
value_range = tuple(map(float, (numpy.min(data), numpy.max(data))))
extent[1] = dimensions * grid_spacing
elif ext in ['.vtk', '.vti']:
assert have_vtk and 'vtk module could not be loaded!'
from vtk.numpy_interface import dataset_adapter
if ext == '.vtk':
dr = vtk.vtkDataSetReader()
dr.SetFileName(volfile)
dr.Update()
sp = dr.GetStructuredPointsOutput()
else:
dr = vtk.vtkXMLImageDataReader()
dr.SetFileName(volfile)
dr.Update()
sp = dr.GetOutput()
assert sp is not None
print(sp)
dimensions = sp.GetDimensions()
extent = numpy.array(sp.GetExtent(), 'float32').reshape((3,2)).swapaxes(0,1)
grid_spacing = numpy.array(sp.GetSpacing(), 'float32')
scalar_type = sp.GetScalarTypeAsString()
print('scalar_type', scalar_type)
volume_do = dataset_adapter.WrapDataObject(sp)
assert len(volume_do.PointData.keys()) > 0
scalar_name = volume_do.PointData.keys()[0]
data = volume_do.PointData[scalar_name]
print('Point scalar data "%s"' % scalar_name, data)
assert len(data.shape) == 1
data = data.reshape(dimensions)
assert len(data.shape) == 3
value_range = tuple(map(float, (numpy.min(data), numpy.max(data))))
else:
raise ValueError('Unknown file extension "%s"' % ext)
print('volume data', data.shape, data.dtype)
print('dimensions', dimensions)
print('value range', value_range)
print('spacing', grid_spacing)
print('extent', extent)
assert value_range is not None and 'Set value range with -v min,max'
if set_value is not None:
axis, minidx, maxidx, value = set_value
assert axis in [0,1,2]
if axis == 0:
data[minidx:maxidx+1] = value
elif axis == 1:
data[:,minidx:maxidx+1] = value
else:
data[:,:,minidx:maxidx+1] = value
# Keep a reference to the numpy array around, as it will get
# deallocated otherwise
saved_data = data
data = ospray.shared_data_constructor(data)
volume = ospray.Volume('structuredRegular')
volume.set_param('gridSpacing', tuple(grid_spacing.tolist()))
volume.set_param('data', data)
volume.commit()
# TF
def generate_tf(values, colors, opacities, T=16):
"""
The values, colors and opacities arrays together sparsely define
the transfer function.
"""
assert value_range is not None
#print(values)
#print(colors)
#print(opacities)
P = values.shape[0]
assert colors.shape[0] == P and colors.shape[1] == 3
assert opacities.shape[0] == P
# Generate equidistant TF from sparse specification
tfcolors = numpy.zeros((T,3), dtype=numpy.float32)
tfopacities = numpy.zeros(T, dtype=numpy.float32)
idx = 0
pos = 0.0
pos_step = value_range[-1]/(T-1)
while idx < T:
valueidx = numpy.interp(pos, values, numpy.arange(P))
lowidx = int(valueidx)
highidx = min(lowidx + 1, P-1)
factor = valueidx - lowidx
#print(value, valueidx, lowidx, highidx, factor)
tfcolors[idx] = (1-factor) * colors[lowidx] + factor * colors[highidx]
tfopacities[idx] = (1-factor) * opacities[lowidx] + factor * opacities[highidx]
idx += 1
pos += pos_step
return tfcolors, tfopacities
if isovalue is not None:
# Fixed color and opacity
tfcolors = numpy.array([[0.8, 0.8, 0.8]], dtype=numpy.float32)
tfopacities = numpy.array([1], dtype=numpy.float32)
elif tf_mode == 'file':
# Read a .trn file
# <minval> <maxval>
# <value> <r> <g> <b> <a>
lines = [l.strip() for l in open(tf_file, 'rt').readlines() if l.strip() != '' and l[0] != '#']
if value_range is None:
value_range = list(map(float, lines[0].split(' ')))
lines = lines[1:]
N = len(lines)
assert N >= 2
values = numpy.zeros(N, dtype=numpy.float32)
colors = numpy.zeros((N,3), dtype=numpy.float32)
opacities = numpy.zeros(N, dtype=numpy.float32)
for idx, line in enumerate(lines):
pp = list(map(float, line.split()))
assert len(pp) == 5
values[idx] = pp[0]
colors[idx] = (pp[1], pp[2], pp[3])
opacities[idx] = pp[4]
tfcolors, tfopacities = generate_tf(values, colors, opacities)
elif tf_mode == 'linear':
# Simple linear TF
tfcolors = numpy.array([[0, 0, 0], [0, 0, 1]], dtype=numpy.float32)
tfopacities = numpy.array([0, 1], dtype=numpy.float32)
elif tf_mode == 'default':
values = numpy.array([
0, 0.318, 0.462, 0.546, 1
], dtype=numpy.float32)
colors = numpy.array([
[0, 0, 1],
[0, 1, 0],
[0.013, 0, 0.5],
[0.229, 0, 0.5],
[0.229, 0, 0.5],
], dtype=numpy.float32)
opacities = numpy.array([
1, 1, 1, 0, 0
], dtype=numpy.float32)
tfcolors, tfopacities = generate_tf(values, colors, opacities)
#print('tfcolors', tfcolors.shape)
#print('tfopacities', tfopacities.shape)
#print('TF:')
#print(value_range)
#print(tfcolors)
#print(tfopacities)
if isovalue is not None:
# Isosurface rendered
isovalues = numpy.array([isovalue], dtype=numpy.float32)
isosurface = ospray.Geometry('isosurface')
isosurface.set_param('isovalue', isovalues)
isosurface.set_param('volume', volume)
isosurface.commit()
material = ospray.Material(RENDERER, 'obj')
material.set_param('kd', (0.5, 0.5, 1.0))
material.set_param('d', 1.0)
material.commit()
gmodel = ospray.GeometricModel(isosurface)
#gmodel.set_param('material', material)
gmodel.commit()
ggroup = ospray.Group()
ggroup.set_param('geometry', [gmodel])
if clipping_gmodel is not None:
ggroup.set_param('clippingGeometry', [clipping_gmodel])
ggroup.commit()
ginstance = ospray.Instance(ggroup)
ginstance.set_param('transform', ospray.mat4.identity())
#ginstance.set_param('transform', ospray.mat4.translate(dimensions[0], 0, 0))
ginstance.commit()
instances = [ginstance]
else:
# Volume rendered
transfer_function = ospray.TransferFunction('piecewiseLinear')
transfer_function.set_param('color', | |
": " + str(reaction.check_mass_balance()))
#h_HAOe <-> h_HAOc
reaction = Reaction('HAO_H_import')
reaction.name = 'H+ import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_HAOe: -1.0,
h_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_HAOc <-> h_HAOe
reaction = Reaction('HAO_H_export')
reaction.name = 'H+ export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_HAOc: -1.0,
h_HAOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ATP for Transport
HAO_ATP_Transport = Metabolite('HAO_ATP_Transport', formula='', name='', compartment='e')
reaction = Reaction('HAO_Transport_ATP')
reaction.name = 'Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({HAO_ATP_Transport: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Formate_Transport_ATP
#atp_HAOc + h2o_HAOc <-> adp_HAOc + pi_HAOc + h_HAOc
reaction = Reaction('HAO_Formate_Transport_ATP')
reaction.name = 'Formate Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
h2o_HAOc: -1.0,
adp_HAOc: 1.0,
pi_HAOc: 1.0,
h_HAOc: 1.0,
ATP_TRANS_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Acetate_Transport_ATP
#atp_HAOc + h2o_HAOc <-> adp_HAOc + pi_HAOc + h_HAOc
reaction = Reaction('HAO_Acetate_Transport_ATP')
reaction.name = 'Acetate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
h2o_HAOc: -1.0,
adp_HAOc: 1.0,
pi_HAOc: 1.0,
h_HAOc: 1.0,
ATP_TRANS_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Proton_Transport_ATP
#atp_HAOc + h2o_HAOc <-> adp_HAOc + pi_HAOc + h_HAOc
reaction = Reaction('HAO_Proton_Transport_ATP')
reaction.name = 'ATP Transport'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
h2o_HAOc: -1.0,
adp_HAOc: 1.0,
pi_HAOc: 1.0,
h_HAOc: 1.0,
ATP_TRANS_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Pentose Phosphate Pathway
# ru5p__D_HAOc <-> xu5p__D_HAOc
ru5p__D_HAOc = Metabolite('ru5p__D_HAOc', formula='C5H9O8P', name='D-Ribulose 5-phosphate', compartment='HAOc',
charge=-2)
xu5p__D_HAOc = Metabolite('xu5p__D_HAOc', formula='C5H9O8P', name='D-xylulose 5-phosphate', compartment='HAOc',
charge=-2)
reaction = Reaction('HAO_RPE')
reaction.name = 'Ribulose 5-phosphate 3-epimerase'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ru5p__D_HAOc: -1.0,
xu5p__D_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# r5p_HAOc <-> ru5p__D_HAOc
r5p_HAOc = Metabolite('r5p_HAOc', formula='C5H9O8P', name='Alpha-D-Ribose 5-phosphate', compartment='HAOc',
charge=-2)
reaction = Reaction('HAO_RPI')
reaction.name = 'Ribose-5-phosphate isomerase'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({r5p_HAOc: -1.0,
ru5p__D_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# r5p_HAOc + xu5p__D_HAOc <-> g3p_HAOc + s7p_HAOc
s7p_HAOc = Metabolite('s7p_HAOc', formula='C7H13O10P', name='Sedoheptulose 7-phosphate', compartment='HAOc',
charge=-2)
g3p_HAOc = Metabolite('g3p_HAOc', formula='C3H5O6P', name='Glyceraldehyde 3-phosphate', compartment='HAOc',
charge=-2)
reaction = Reaction('HAO_TKT1')
reaction.name = 'Transketolase 1'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({r5p_HAOc: -1.0,
xu5p__D_HAOc: -1.0,
g3p_HAOc: 1.0,
s7p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# g3p_HAOc + s7p_HAOc <-> e4p_HAOc + f6p_HAOc
f6p_HAOc = Metabolite('f6p_HAOc', formula='C6H11O9P', name='D-Fructose 6-phosphate', compartment='HAOc', charge=-2)
e4p_HAOc = Metabolite('e4p_HAOc', formula='C4H7O7P', name='D-Erythrose 4-phosphate', compartment='HAOc', charge=-2)
reaction = Reaction('HAO_TALA')
reaction.name = 'Transaldolase'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({g3p_HAOc: -1.0,
s7p_HAOc: -1.0,
e4p_HAOc: 1.0,
f6p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# e4p_HAOc + xu5p__D_HAOc <-> f6p_HAOc + g3p_HAOc
reaction = Reaction('HAO_TKT2')
reaction.name = 'Transketolase 2'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({e4p_HAOc: -1.0,
xu5p__D_HAOc: -1.0,
f6p_HAOc: 1.0,
g3p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# g6p_HAOc <-> f6p_HAOc
g6p_HAOc = Metabolite('g6p_HAOc', formula='C6H11O9P', name='D-Glucose 6-phosphate', compartment='HAOc', charge=-2)
reaction = Reaction('HAO_PGI')
reaction.name = 'Glucose-6-phosphate isomerase'
reaction.subsystem = 'Hexose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({g6p_HAOc: -1.0,
f6p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Upper Glycolysis
# atp_HAOc + f6p_HAOc <-> adp_HAOc + fdp_HAOc + h_HAOc
fdp_HAOc = Metabolite('fdp_HAOc', formula='C6H10O12P2', name='D-Fructose 1,6-bisphosphate', compartment='HAOc',
charge=-4)
reaction = Reaction('HAO_PFK')
reaction.name = 'Phosphofructokinase'
reaction.subsystem = 'Upper Glycolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HAOc: -1.0,
f6p_HAOc: -1.0,
adp_HAOc: 1.0,
fdp_HAOc: 1.0,
h_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# fdp_HAOc <-> dhap_HAOc + g3p_HAOc
dhap_HAOc = Metabolite('dhap_HAOc', formula='C3H5O6P', name='Dihydroxyacetone phosphate', compartment='HAOc',
charge=-2)
reaction = Reaction('HAO_FBA')
reaction.name = 'Fructose-bisphosphate aldolase'
reaction.subsystem = 'Upper Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdp_HAOc: -1.0,
dhap_HAOc: 1.0,
g3p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# dhap_HAOc <-> g3p_HAOc
dhap_HAOc = Metabolite('dhap_HAOc', formula='C3H5O6P', name='Dihydroxyacetone phosphate', compartment='HAOc',
charge=-2)
reaction = Reaction('HAO_TPI')
reaction.name = 'Triose-phosphate isomerase'
reaction.subsystem = 'Upper Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({dhap_HAOc: -1.0,
g3p_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Lower Glycolysis
# g3p_HAOc + nad_HAOc + pi_HAOc <-> 13dpg_HAOc + h_HAOc + nadh_HAOc
_13dpg_HAOc = Metabolite('_13dpg_HAOc', formula='C3H4O10P2', name='3-Phospho-D-glyceroyl phosphate',
compartment='HAOc', charge=-4)
reaction = Reaction('HAO_GAPD')
reaction.name = 'Glyceraldehyde-3-phosphate dehydrogenase'
reaction.subsystem = 'Lower Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({g3p_HAOc: -1.0,
nad_HAOc: -1.0,
pi_HAOc: -1.0,
_13dpg_HAOc: 1.0,
h_HAOc: 1.0,
nadh_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# 3pg_HAOc + atp_HAOc <-> 13dpg_HAOc + adp_HAOc
_3pg_HAOc = Metabolite('_3pg_HAOc', formula='C3H4O7P', name='3-Phospho-D-glycerate', compartment='HAOc', charge=-3)
reaction = Reaction('HAO_PGK')
reaction.name = 'Phosphoglycerate kinase'
reaction.subsystem = 'Lower Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_3pg_HAOc: -1.0,
atp_HAOc: -1.0,
_13dpg_HAOc: 1.0,
adp_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# 2pg_HAOc <-> 3pg_HAOc
_2pg_HAOc = Metabolite('_2pg_HAOc', formula='C3H4O7P', name='2-Phospho-D-glycerate', compartment='HAOc', charge=-3)
reaction = Reaction('HAO_PGM')
reaction.name = 'Phosphoglycerate mutase'
reaction.subsystem = 'Lower Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_2pg_HAOc: -1.0,
_3pg_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# 2pg_HAOc <-> h2o_HAOc + pep_HAOc
pep_HAOc = Metabolite('pep_HAOc', formula='C3H2O6P', name='Phosphoenolpyruvate', compartment='HAOc', charge=-3)
reaction = Reaction('HAO_ENO')
reaction.name = 'Enolase'
reaction.subsystem = 'Lower Glycolysis'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({_2pg_HAOc: -1.0,
h2o_HAOc: 1.0,
pep_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# adp_HAOc + h_HAOc + pep_HAOc <-> atp_HAOc + pyr_HAOc
pyr_HAOc = Metabolite('pyr_HAOc', formula='C3H3O3', name='Pyruvate', compartment='HAOc', charge=-1)
reaction = Reaction('HAO_PYK')
reaction.name = 'Pyruvate kinase'
reaction.subsystem = 'Lower Glycolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({adp_HAOc: -1.0,
h_HAOc: -1.0,
pep_HAOc: -1.0,
atp_HAOc: 1.0,
pyr_HAOc: 1.0,
ATP_SLP_HAO: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# Formate Transport
# for_HAOe <-> for_e
for_HAOc = Metabolite('for_HAOc', formula='C1H1O2', name='Formate', compartment='HAOc', charge=-1)
for_HAOe = Metabolite('for_HAOe', formula='CHO2', name='Formate', compartment='HAOe', charge=-1)
reaction = Reaction('HAO_EX_for')
reaction.name = 'HAO for exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_e: HAO_Abnd,
for_HAOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_HAOe + h_HAOe <-> for_HAOc + h_HAOc
reaction = Reaction('HAO_Formate_import')
reaction.name = 'Formate import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_HAOe: -1.0,
h_HAOe: -1.0,
for_HAOc: 1.0,
h_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_HAOc + h_HAOc <-> for_HAOe + h_HAOe
reaction = Reaction('HAO_Formate_export')
reaction.name = 'Formate_export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_HAOc: -1.0,
h_HAOc: -1.0,
for_HAOe: 1.0,
h_HAOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# Formate_Transport_ATP
# atp_HAOc + h2o_HAOc <-> adp_HAOc + pi_HAOc + h_HAOc
reaction = Reaction('HAO_Formate_Transport_ATP')
reaction.name = 'Formate Transport ATP'
reaction.subsystem | |
<filename>tofu/mag/equimap.py
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
EQUIMAP tools module, functions ...
'''
# Standard python modules
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import numpy as np
import os
import re
import scipy.interpolate as interpolate
import warnings
#import sys
# Local modules
import imas
try:
import imas_west
except ImportError as err:
pass
# print(err)
try:
import pywed as pw
except ImportError as err:
pass
# print(err)
# Project modules
try:
import mag_ripple as mr
except ImportError as err:
pass
# print(err)
__all__ = ['get']
# Parameters
min_Ip = 100000
def get(shot, time, R, Phi, Z, quantity, no_ripple=False, \
run=0, occ=0, user='imas_public', machine='west'):
'''
Interpolation of the requested quantity for the list time at
input coodinates R, Phi, Z
WARNING: Ripple is only taken into account for 'b_field_norm', 'b_field_r',
'b_field_z' and b_field_tor' quantities for the moment
Coordinate convention used (COCOS 11, see Sauter, Medvedev, Comp.Phys.Com. 184, 2013)
-------------------------------------------------------------------------------------
Cylindrical | Poloidal | Phi from top | theta (pol ang) from front | psi
------------------------------------------------------------------------------------------
(R, Phi, Z) | (rho, theta, Phi) | cnt-clockwise | clockwise | increasing
Parameters
----------
shot : int
shot number
time : list of floats [s]
times where to perform interpolation (ABSOLUTE TIME, without t_ignitron)
R : list of floats [m]
big radius where to perform interpolation
Phi : list of floats, same length as R [rad]
toroidal angle where to perform interpolation
Z : list of floats, same length as R [m]
vertical coordinate where to perform interpolation
quantity : string
for which quantity perform interpolation. One of
``rho_pol_norm``
poloidal flux coordinate (normalized)
``rho_tor_norm``
toroidal flux coordinate (normalized)
``rho_tor``
toroidal flux coordinate [m]
``psi``
poloidal flux [Wb]
``phi``
toroidal flux [Wb]
``theta``
poloidal angle [rad] in the range [0, 2*pi[
``j_tor``
toroidal current density [A.m^-2]
``j_parallel``
parallel current density [A.m^-2]
``b_field_r``
big radius (R) component of the poloidal magnetic field [T]
``b_field_z``
vertical (Z) component of the poloidal magnetic field [T]
``b_field_tor``
toroidal component of the magnetic field [T]
``b_field_norm``
total magnetic field norm [T]
no_ripple : boolean, optional (default=False)
do not calculate magnetic ripple
run : run number, optional (default=0)
occ : occurrence number, optional (default=0)
user : user name, optional (default=imas_public)
machine : machine name, optional (default=west)
Returns
-------
out : ndarray, shape (time, points)
interpolated quantity for list time at points with coordinates R, Phi, Z
'''
#print('time =', time)
#print('R =', R)
#print('Phi =', Phi)
#print('Z =', Z)
print('quantity =', quantity)
# Check if shot exists
run_number = '{:04d}'.format(run)
shot_file = os.path.expanduser('~' + user + '/public/imasdb/' + machine + \
'/3/0/' + 'ids_' + str(shot) + run_number + \
'.datafile')
if (not os.path.isfile(shot_file)):
raise FileNotFoundError('IMAS file does not exist')
# Get equilibrium fast
idd = imas.ids(shot, run)
idd.open_env(user, machine, '3')
idd.equilibrium.get()
equi = idd.equilibrium
# Check code.output_flag for data validity
if (np.any(np.isnan(equi.code.output_flag))):
mask = np.full(len(equi.time), True, dtype=bool)
else:
mask = np.asarray(equi.code.output_flag) >= 0
bool_b_field = False
if isinstance(quantity, list):
ar_bool = np.full(len(quantity), False)
for ii in range(len(quantity)):
if (re.match('b_field_*', quantity[ii])):
ar_bool[ii] = True
if (np.any(ar_bool)):
bool_b_field = True
else:
if (re.match('b_field_*', quantity)):
bool_b_field = True
if (bool_b_field):
# Get Itor (current of toroidal coils, coils that produce the toroidal field)
itor, t_itor = pw.tsbase(shot, 'gmag_itor', nargout=2)
t_ignitron = pw.tsmat(shot, 'IGNITRON|1')
t_itor += t_ignitron[0]
ismag, tsmag = pw.tsbase(shot, 'SMAG_IP', nargout=2)
t_smag_filter = tsmag[(abs(ismag[:, 0]*1000) > min_Ip), 0]
t_mid = 0.5*(t_smag_filter[-1] - t_smag_filter[0]) \
+ t_ignitron[0]
ind_mid = np.abs(equi.time[mask] - t_mid).argmin()
else:
itor = None
t_itor = None
t_ignitron = None
ind_mid = None
ar_time = np.atleast_1d(np.squeeze(np.asarray([time])))
ar_R = np.atleast_1d(np.squeeze(np.asarray([R])))
ar_Phi = np.atleast_1d(np.squeeze(np.asarray([Phi])))
ar_Z = np.atleast_1d(np.squeeze(np.asarray([Z])))
if (ar_time.size > 1):
mask_time_tmp = (equi.time[mask] >= ar_time.min()) \
& (equi.time[mask] <= ar_time.max())
indMin = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][0]).argmin()
indMax = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][-1]).argmin()
if (indMin == 0):
indMinApply = indMin
else:
indMinApply = indMin - 1
if (indMax == (equi.time[mask].size-1)):
indMaxApply = indMax
else:
indMaxApply = indMax + 1
mask_time = (equi.time[mask] >= equi.time[mask][indMinApply]) \
& (equi.time[mask] <= equi.time[mask][indMaxApply])
time_points = equi.time[mask][mask_time]
#firstSpaceInterp = True # For test
if (ar_time.size > time_points.size):
print('__________> First perform space interpolation')
firstSpaceInterp = True # In this case is fast to interpolate first spatially
else:
firstSpaceInterp = False
else:
firstSpaceInterp = False
mask_time = None
time_points = None
equiDict = {}
# Declaration of arrays 2d plots
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
equiDict['r'] = np.full(NbrPoints, np.nan)
equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
ind_valid = np.argmax(mask)
NbrProf = len(equi.time_slice[ind_valid].profiles_1d.psi)
equiDict['psi'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['phi'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['theta'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['j_tor'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['j_parallel'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['b_field_r'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['b_field_z'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['b_field_tor'] = np.full((len(equi.time), NbrPoints), np.nan)
equiDict['prof_1d_psi'] = np.full((len(equi.time), NbrProf), np.nan)
equiDict['prof_1d_rho_tor'] = np.full((len(equi.time), NbrProf), np.nan)
equiDict['mag_axis_r'] = np.full(len(equi.time), np.nan)
equiDict['mag_axis_z'] = np.full(len(equi.time), np.nan)
for ii in range(len(equi.time)):
equi_slice = equi.time_slice[ii]
equi_space = equi.time_slice[ii].ggd[0]
if (equi_space.psi):
equiDict['psi'][ii] = equi_space.psi[0].values
if (equi_space.phi):
equiDict['phi'][ii] = equi_space.phi[0].values
if (equi_space.theta):
equiDict['theta'][ii] = equi_space.theta[0].values
if (equi_space.j_tor):
equiDict['j_tor'][ii] = equi_space.j_tor[0].values
if (equi_space.j_parallel):
equiDict['j_parallel'][ii] = equi_space.j_parallel[0].values
if (equi_space.b_field_r):
equiDict['b_field_r'][ii] = equi_space.b_field_r[0].values
if (equi_space.b_field_z):
equiDict['b_field_z'][ii] = equi_space.b_field_z[0].values
if (equi_space.b_field_tor):
equiDict['b_field_tor'][ii] = equi_space.b_field_tor[0].values
equiDict['prof_1d_psi'][ii] = equi_slice.profiles_1d.psi
equiDict['prof_1d_rho_tor'][ii] = equi_slice.profiles_1d.rho_tor
equiDict['mag_axis_r'][ii] = equi_slice.global_quantities.magnetic_axis.r
equiDict['mag_axis_z'][ii] = equi_slice.global_quantities.magnetic_axis.z
points = np.vstack((equiDict['r'], equiDict['z'])).transpose()
interp_points = np.vstack((ar_R, ar_Z)).transpose()
if isinstance(quantity, list):
out = {}
for iquant in quantity:
out[iquant] = \
interp_quantity(iquant, interp_points, points, time_points, equi, \
ar_time, ar_R, ar_Phi, ar_Z, mask, mask_time, \
firstSpaceInterp, itor, t_itor, t_ignitron, \
no_ripple, ind_mid, equiDict)
else:
out = interp_quantity(quantity, interp_points, points, time_points, equi, \
ar_time, ar_R, ar_Phi, ar_Z, mask, mask_time, \
firstSpaceInterp, itor, t_itor, t_ignitron, \
no_ripple, ind_mid, equiDict)
return out
def interp_quantity(quantity, interp_points, points, time_points, equi, \
ar_time, ar_R, ar_Phi, ar_Z, mask, mask_time, \
firstSpaceInterp, itor, t_itor, t_ignitron, \
no_ripple, ind_mid, equiDict):
value_interpolated = np.full((ar_time.size, ar_R.size), np.nan)
if (firstSpaceInterp):
value_interpSpace = np.full((time_points.size, ar_R.size), np.nan)
# Computation of requested quantities
if (quantity == 'b_field_norm'):
if (no_ripple):
b_field_norm = np.sqrt(equiDict['b_field_r']**2. \
+ equiDict['b_field_z']**2. \
+ equiDict['b_field_tor']**2.)
if (firstSpaceInterp):
# Space interpolation
for ii in range(time_points.size):
lin_intp = interpolate.LinearNDInterpolator(points, \
b_field_norm[mask, :][mask_time][ii, :])
value_interpSpace[ii, :] = lin_intp.__call__(interp_points)
else:
# Time interpolation
f_intp = interpolate.interp1d(equi.time[mask], \
b_field_norm[mask, :], axis=0, \
bounds_error=False)
else: # B_norm with ripple calculation
# Declaration arrays
br_intp = np.full((ar_time.size, ar_R.size), np.nan)
bt_intp = np.full((ar_time.size, ar_R.size), np.nan)
bz_intp = np.full((ar_time.size, ar_R.size), np.nan)
if (firstSpaceInterp):
br_Sintp = np.full((time_points.size, ar_R.size), np.nan)
bt_Sintp = np.full((time_points.size, ar_R.size), np.nan)
bz_Sintp = np.full((time_points.size, ar_R.size), np.nan)
# Space interpolation
for ii in range(time_points.size):
lin_intp = interpolate.LinearNDInterpolator(points, \
equiDict['b_field_r'][mask, :][mask_time][ii, :])
br_Sintp[ii, :] = lin_intp.__call__(interp_points)
lin_intp = interpolate.LinearNDInterpolator(points, \
equiDict['b_field_tor'][mask, :][mask_time][ii, :])
bt_Sintp[ii, :] = lin_intp.__call__(interp_points)
lin_intp = interpolate.LinearNDInterpolator(points, \
equiDict['b_field_z'][mask, :][mask_time][ii, :])
bz_Sintp[ii, :] = lin_intp.__call__(interp_points)
# Time interpolation
f_intp = interpolate.interp1d(time_points, \
br_Sintp, axis=0, \
bounds_error=False)
br_intp = np.atleast_2d(np.squeeze(f_intp(ar_time)))
f_intp = interpolate.interp1d(time_points, \
bt_Sintp, axis=0, \
bounds_error=False)
bt_intp = np.atleast_2d(np.squeeze(f_intp(ar_time)))
f_intp = interpolate.interp1d(time_points, \
bz_Sintp, axis=0, \
bounds_error=False)
bz_intp = np.atleast_2d(np.squeeze(f_intp(ar_time)))
else:
# Time interpolation
f_intp_br = interpolate.interp1d(equi.time[mask], \
equiDict['b_field_r'][mask, :], axis=0, \
bounds_error=False)
f_intp_bt = interpolate.interp1d(equi.time[mask], \
equiDict['b_field_tor'][mask, :], axis=0, \
bounds_error=False)
f_intp_bz = interpolate.interp1d(equi.time[mask], \
equiDict['b_field_z'][mask, :], axis=0, \
bounds_error=False)
br_intp_t = np.atleast_2d(np.squeeze(f_intp_br(ar_time)))
bt_intp_t = np.atleast_2d(np.squeeze(f_intp_bt(ar_time)))
bz_intp_t = np.atleast_2d(np.squeeze(f_intp_bz(ar_time)))
# Space interpolation
for ii in range(ar_time.size):
lin_intp = interpolate.LinearNDInterpolator(points, br_intp_t[ii])
br_intp[ii, :] = lin_intp.__call__(interp_points)
lin_intp = interpolate.LinearNDInterpolator(points, bt_intp_t[ii])
bt_intp[ii, :] = lin_intp.__call__(interp_points)
lin_intp = interpolate.LinearNDInterpolator(points, bz_intp_t[ii])
bz_intp[ii, :] = lin_intp.__call__(interp_points)
# Interpolate current
itor_intp_t = np.interp(ar_time, t_itor[:, 0], itor[:, 0])
b0_intp_t = np.interp(ar_time, equi.time[mask], \
equi.vacuum_toroidal_field.b0[mask])
# Compute reference vaccuum magnetic field
bt_vac = equi.vacuum_toroidal_field.r0*b0_intp_t[:, np.newaxis] \
/ ar_R[np.newaxis, :]
# Compute magnetic field for given Phi
br_ripple, bt_ripple, bz_ripple = mr.mag_ripple(ar_R, ar_Phi, \
ar_Z, itor_intp_t)
# Check and correct Br if needed
r_ax = equi.time_slice[ind_mid].global_quantities.magnetic_axis.r
z_mid_ax = \
equi.time_slice[ind_mid].global_quantities.magnetic_axis.z \
+ 0.5*equi.time_slice[ind_mid].boundary.minor_radius
ind_p = np.abs((equiDict['r'] - r_ax)**2. \
+ (equiDict['z'] - z_mid_ax)**2).argmin()
if (equiDict['b_field_r'][ind_mid, ind_p] > 0):
br_intp *= -1
warnings.warn('Correcting b_field_r in b_field_norm, for negative toroidal current COCOS 11')
# Check and correct Bz if needed
r_mid_ax = \
equi.time_slice[ind_mid].global_quantities.magnetic_axis.r | |
return AutomaticallyRetrievedGraph(
"APO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CLO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-02-10", **kwargs
) -> Graph:
"""Return CLO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-02-10"
Version to retrieve
The available versions are:
- 2022-03-20
- 2019-02-10
"""
return AutomaticallyRetrievedGraph(
"CLO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-02-19", **kwargs
) -> Graph:
"""Return CMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-02-19"
Version to retrieve
The available versions are:
- 2019-02-19
"""
return AutomaticallyRetrievedGraph(
"CMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OHMI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-09-17", **kwargs
) -> Graph:
"""Return OHMI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-09-17"
Version to retrieve
The available versions are:
- 2019-09-17
"""
return AutomaticallyRetrievedGraph(
"OHMI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def HSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-11-28", **kwargs
) -> Graph:
"""Return HSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-11-28"
Version to retrieve
The available versions are:
- 2021-12-13
- 2020-11-28
"""
return AutomaticallyRetrievedGraph(
"HSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FBBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-11-06", **kwargs
) -> Graph:
"""Return FBBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-11-06"
Version to retrieve
The available versions are:
- 2020-11-06
"""
return AutomaticallyRetrievedGraph(
"FBBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OBI(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-18", **kwargs
) -> Graph:
"""Return OBI graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-18"
Version to retrieve
The available versions are:
- 2022-01-03
- 2021-08-18
"""
return AutomaticallyRetrievedGraph(
"OBI", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CDAO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-06-26", **kwargs
) -> Graph:
"""Return CDAO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-06-26"
Version to retrieve
The available versions are:
- 2019-06-26
"""
return AutomaticallyRetrievedGraph(
"CDAO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def MFMO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2013-11-16", **kwargs
) -> Graph:
"""Return MFMO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2013-11-16"
Version to retrieve
The available versions are:
- 2013-11-16
"""
return AutomaticallyRetrievedGraph(
"MFMO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2019-12-11", **kwargs
) -> Graph:
"""Return CRO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2019-12-11"
Version to retrieve
The available versions are:
- 2019-12-11
"""
return AutomaticallyRetrievedGraph(
"CRO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CHEMINF(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.0", **kwargs
) -> Graph:
"""Return CHEMINF graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in | |
hard_label_fc.size(0)
selected_num = int(percent * num_unl)
if self.args.filter_type == 'fc':
scores_for_prediction = soft_label_uniform_fc
scores, hard_label_prediction = torch.max(scores_for_prediction, dim=1)
elif self.args.filter_type == 'cluster':
scores_for_prediction = soft_label_uniform_kmean
scores, hard_label_prediction = torch.max(scores_for_prediction, dim=1)
elif self.args.filter_type == 'lp':
scores_for_prediction = soft_label_uniform_lp
scores, hard_label_prediction = torch.max(scores_for_prediction, dim=1)
elif self.args.filter_type == 'all':
scores_for_prediction = (soft_label_uniform_fc + soft_label_uniform_kmean + soft_label_uniform_lp) / 3
scores, hard_label_prediction = torch.max(scores_for_prediction, dim=1)
if self.args.entropy_filter: #### adopt the entropy of sample
scores = Categorical(probs=scores_for_prediction).entropy() ### N dimension vector.
if self.args.category_rank: ### select samples of each category with TOP percent scores
raise NotImplementedError ### the code to be checked before usage.
index_category = torch.BoolTensor(num_unl).fill_(False)
for i in range(self.args.num_class):
category_per_index = hard_label_prediction == i
scores_of_per_category = scores[category_per_index]
num_in_category = scores_of_per_category.size(0)
selected_num_in_category = int(percent * num_in_category)
if selected_num_in_category == 0:
print('no samples have been selected for category %d' % (i))
break
value, _ = torch.topk(scores_of_per_category, selected_num_in_category)
thr = value[-1] - 1e-8
idx_in_category = (scores > thr) & category_per_index
index_category = index_category | idx_in_category
idx = target_u_index[index_category]
else: ### select samples with TOP percent scores
if selected_num == 0:
index_all = torch.BoolTensor(num_unl).fill_(False)
idx = target_u_index[index_all]
acc_pseudo_selected = 0
else:
if self.args.entropy_filter: #### adopt the entropy of sample
value, _ = torch.topk(scores, selected_num, largest=False)
threshold_manully = (1 - self.args.thr) * math.log(self.args.num_class)
thr = min(value[-1], threshold_manully)
idx = target_u_index[(scores <= thr)]
acc_pseudo_selected = accuracy(scores_for_prediction[(scores <= thr)],
target_u_gt_label_for_visual[(scores <= thr)])
else:
value, _ = torch.topk(scores, selected_num)
threshold_manully = self.args.thr
thr = max(value[-1], threshold_manully) #### filter samples with low prediction.
print('the threshold is: %3f' % (thr))
idx = target_u_index[(scores >= thr)]
if len(idx) == 0:
acc_pseudo_selected = 0
else:
acc_pseudo_selected = accuracy(scores_for_prediction[(scores >= thr)].cuda(),
target_u_gt_label_for_visual[(scores >= thr)])
## construct the list with the global index of [0,1, .....]
reverse_index = torch.LongTensor(len(target_u_index)).fill_(0)
for normal_index in range(len(target_u_index)):
reverse_index[target_u_index[normal_index]] = normal_index
self.all_hard_pseudo_label = hard_label_prediction[reverse_index].cuda()
self.all_soft_pseudo_label = scores_for_prediction[reverse_index].cuda()
else:
raise NotImplementedError
num_unl = hard_label_fc.size(0)
if self.args.filter_type == 'fc':
idx = target_u_index[(scores_fc.cpu() > self.args.thr)]
elif self.args.filter_type == 'cluster':
idx = target_u_index[
(scores_fc.cpu() > self.args.thr) & (hard_label_uniform_fc.cpu() == hard_label_uniform_kmean)]
elif self.args.filter_type == 'ssl':
idx = target_u_index[
(scores_fc.cpu() > self.args.thr) & (hard_label_uniform_fc.cpu() == hard_label_uniform_lp)]
elif self.args.filter_type == 'both':
idx = target_u_index[
(scores_fc.cpu() > self.args.thr) & (hard_label_uniform_fc.cpu() == hard_label_uniform_lp) & (
hard_label_uniform_fc.cpu() == hard_label_uniform_kmean)]
# elif self.args.filter_type == 'either':
# idx = target_u_index[
# ((scores_fc.cpu() > self.args.thr) & (hard_label_uniform_fc.cpu() == hard_label_uniform_lp)) | (
# (scores_fc.cpu() > self.args.thr) & (
# hard_label_uniform_fc.cpu() == hard_label_uniform_kmean))]
if len(idx) == 0:
acc_pseudo_selected = 0
else:
acc_pseudo_selected = accuracy(soft_label_uniform_fc[(scores_fc >= self.args.thr)].cuda(),
target_u_gt_label_for_visual[(scores_fc >= self.args.thr)])
## construct the list with the global index of [0,1, .....]
reverse_index = torch.LongTensor(len(target_u_index)).fill_(0)
for normal_index in range(len(target_u_index)):
reverse_index[target_u_index[normal_index]] = normal_index
self.all_hard_pseudo_label = hard_label_uniform_fc[reverse_index].cuda()
self.all_soft_pseudo_label = soft_label_uniform_fc[reverse_index].cuda()
acc_pseudo_label = accuracy(self.all_soft_pseudo_label, target_u_gt_label_for_visual[reverse_index])
target_u_gt_label_for_visual = target_u_gt_label_for_visual[reverse_index]
print('Select number: [%d/%d], acc: %3f, acc_seltect: %3f ' % (idx.size(0), num_unl, acc_pseudo_label, acc_pseudo_selected))
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('Select number: [%d/%d], acc: %3f, acc_seltect: %3f ' % (idx.size(0), num_unl, acc_pseudo_label, acc_pseudo_selected))
log.close()
#### calculate the target category center with the self.all_hard_pseudo_label
target_u_feature_matrix = target_u_feature_matrix[reverse_index] #### re-rank the feature to match the self.all_hard_pseudo_label
target_feature_category_list = []
for i in range(self.args.num_class):
target_feature_category_list.append([])
for i in range(target_u_feature_matrix.size(0)):
pseudo_label = self.all_hard_pseudo_label[i]
target_feature_category_list[pseudo_label].append(target_u_feature_matrix[i].view(1, target_u_feature_matrix.size(1)))
for i in range(self.args.num_class):
if len(target_feature_category_list[i]) >0:
target_feature_category_list[i] = torch.cat(target_feature_category_list[i], dim=0)
target_feature_category_list[i] = target_feature_category_list[i].mean(0).view(1, target_u_feature_matrix.size(1)) #### get the mean of each target category
else:
## if no target samples is selected for category i
target_feature_category_list[i] = self.previous_target_feature_category_matrix[i].view(1, target_u_feature_matrix.size(1))
target_feature_category_matrix = torch.cat(target_feature_category_list, dim=0)
self.previous_target_feature_category_matrix = target_feature_category_matrix
## predicting the source category with the target category mean, and remove the source data with smaller scores [they make the prototypical classifier confusing.]
## 1. re-order the source data to the default index (0,1,2,3....)
## construct the list with the global index of [0,1, .....]
reverse_index_source = torch.LongTensor(len(source_index)).fill_(0)
for normal_index in range(len(source_index)):
reverse_index_source[source_index[normal_index]] = normal_index
source_feature_matrix = source_feature_matrix[reverse_index_source]
hard_label_s = hard_label_s[reverse_index_source]
target_feature_category_matrix_unsq = torch.unsqueeze(target_feature_category_matrix, 0)
source_feature_matrix_unsq = torch.unsqueeze(source_feature_matrix, 1)
L2_dis = ((source_feature_matrix_unsq.cpu() - target_feature_category_matrix_unsq.cpu()) ** 2).mean(2)
soft_label_target_prototypical = torch.softmax(1 + 1.0 / (L2_dis + 1e-8), dim=1).cuda()
### get the score on the source GT categories, and use the score to filter source samples with lower probability.
instance_index = torch.arange(soft_label_target_prototypical.size(0))
score_source = soft_label_target_prototypical[instance_index, hard_label_s] #### score for each source sample.
## calculate the source soft weight following SRDC
target_center_mul_source_feature = torch.matmul(source_feature_matrix, target_feature_category_matrix.transpose(0,1))
target_center_mul_source_feature = target_center_mul_source_feature[instance_index, hard_label_s]
target_feature_category_matrix_expand_for_source = target_feature_category_matrix[hard_label_s]
target_center_norm_mul_source_feature_norm = torch.norm(source_feature_matrix, dim=1) * torch.norm(target_feature_category_matrix_expand_for_source, dim=1)
source_soft_weight = (target_center_mul_source_feature / target_center_norm_mul_source_feature_norm + 1) / 2.0
self.source_soft_weight = source_soft_weight
source_percent = 1 - percent
num_source = soft_label_target_prototypical.size(0)
selected_num_source = int(source_percent * num_source)
if selected_num_source == 0:
index_source_selected = torch.BoolTensor(num_source).fill_(False)
idx_source = source_index[index_source_selected]
else:
value, _ = torch.topk(score_source, selected_num_source)
threshold_manully = self.args.thr
thr = max(value[-1], threshold_manully) #### filter samples with low prediction.
print('the threshold is: %3f' % (thr))
idx_source = source_index[(score_source >= thr)]
print("source selection: %3f, [%d/%d]" % (source_percent, idx_source.size(0), num_source))
## construct a protopical network classifier, where the protopicals are calculated by calculated by the mean of the labeled data and pseudo-labeled unlabeled data.
for i in range(self.args.num_class):
source_feature_category = source_feature_matrix[hard_label_s == i]
target_u_feature_category = target_u_feature_matrix[self.all_hard_pseudo_label == i]
all_feature_category = torch.cat((source_feature_category, target_u_feature_category), dim=0)
self.proto.data[i] = all_feature_category.mean(0).data.clone()
self.previous_selected_index = self.selected_index
self.previsoud_selected_index_source = self.selected_index_source
self.hard_label_s = hard_label_s.cuda()
self.hard_label_t = target_u_gt_label_for_visual.cuda()
if self.args.target_selection == 'ours':
self.selected_index = idx.numpy().tolist() ### the threshold pseudo label
self.selected_index_source = idx_source.numpy().tolist() ### the threshold pseudo label
# elif self.args.target_selection == 'gt':
# self.selected_index = list(range(selected_num))
elif self.args.target_selection == 'rand':
all_list = list(range(num_unl))
self.selected_index = random.sample(all_list, selected_num)
all_list = list(range(num_source))
self.selected_index_source = random.sample(all_list, selected_num_source) ### the threshold pseudo label
else:
raise NotImplementedError
##### the sample distribution in different intermediate domains.
# if self.epoch != 0:
# self.calculate_wasserstein_infinity_dis_of_consecutive_domains(source_feature_matrix, target_u_feature_matrix)
# self.calculate_a_dis_of_consecutive_domains(source_feature_matrix, target_u_feature_matrix)
# self.selected_index = idx.numpy().tolist() ### the threshold pseudo label
# self.selected_index_source = idx_source.numpy().tolist() ### the threshold pseudo label
def calculate_a_dis_of_consecutive_domains(self, source_feature_matrix, target_u_feature_matrix):
num_for_dis = 2500
num_source = source_feature_matrix.size(0)
num_target = target_u_feature_matrix.size(0)
indices_source = torch.randperm(num_source)
indices_target = torch.randperm(num_target)
source_feature_matrix_sampled = source_feature_matrix[indices_source][:2500]
target_u_feature_matrix_sampled = target_u_feature_matrix[indices_target][:2500]
a_dis_st = proxy_a_distance(source_feature_matrix_sampled.cpu().numpy(), target_u_feature_matrix_sampled.cpu().numpy(),
verbose=True)
previous_domain = torch.cat((source_feature_matrix[self.previsoud_selected_index_source], target_u_feature_matrix[self.previous_selected_index]), dim=0)
current_domain = torch.cat(
(source_feature_matrix[self.selected_index_source], target_u_feature_matrix[self.selected_index]),
dim=0)
num_previous = previous_domain.size(0)
num_current = current_domain.size(0)
indices_previous = torch.randperm(num_previous)
indices_current = torch.randperm(num_current)
previous_feature_matrix_sampled = previous_domain[indices_previous][:2500]
current_feature_matrix_sampled = current_domain[indices_current][:2500]
a_dis_pc = proxy_a_distance(previous_feature_matrix_sampled.cpu().numpy(), current_feature_matrix_sampled.cpu().numpy(),
verbose=True)
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('A-distance ST: %3f, consecutive: %3f' % (a_dis_st, a_dis_pc))
log.close()
def calculate_wasserstein_infinity_dis_of_consecutive_domains(self, source_feature_matrix, target_u_feature_matrix):
# self.hard_label_s = hard_label_s
# self.hard_label_t = target_u_gt_label_for_visual
# self.previsoud_selected_index_source
# self.previous_selected_index
wasser_dis_st = []
wasser_dis_pc = []
wasser_dis_same_domain_s = []
wasser_dis_same_domain_t = []
for i in range(self.args.num_class):
s_cate_i = source_feature_matrix[self.hard_label_s == i]
t_cate_i = target_u_feature_matrix[self.hard_label_t == i]
num_source = s_cate_i.size(0)
num_target = t_cate_i.size(0)
indices_previous = torch.randperm(num_source)
indices_current = torch.randperm(num_target)
s_cate_i = s_cate_i[indices_previous]
t_cate_i = t_cate_i[indices_current]
t_cate_i_half = int(num_target /2.0)
s_cate_i_half = int(num_source / 2.0)
wasserstein_infinity = wasserstein_infinity_calculation(s_cate_i.cpu().numpy(), t_cate_i.cpu().numpy())
wasserstein_infinity_same_t = wasserstein_infinity_calculation(t_cate_i[:t_cate_i_half].cpu().numpy(), t_cate_i[t_cate_i_half:].cpu().numpy())
wasser_dis_same_domain_t.append(wasserstein_infinity_same_t)
wasserstein_infinity_same_s = wasserstein_infinity_calculation(s_cate_i[:s_cate_i_half].cpu().numpy(), s_cate_i[s_cate_i_half:].cpu().numpy())
wasser_dis_same_domain_s.append(wasserstein_infinity_same_s)
wasser_dis_st.append(wasserstein_infinity)
wasser_dis_st = max(wasser_dis_st)
wasser_dis_same_domain_t = max(wasser_dis_same_domain_t)
wasser_dis_same_domain_s = max(wasser_dis_same_domain_s)
########
previous_domain = torch.cat((source_feature_matrix[self.previsoud_selected_index_source], target_u_feature_matrix[self.previous_selected_index]), dim=0)
current_domain = torch.cat(
(source_feature_matrix[self.selected_index_source], target_u_feature_matrix[self.selected_index]), dim=0)
label_for_previous = torch.cat((self.hard_label_s[self.previsoud_selected_index_source], self.hard_label_t[self.previous_selected_index]), dim=0)
label_for_current = torch.cat((self.hard_label_s[self.selected_index_source], self.hard_label_t[self.selected_index]), dim=0)
for i in range(self.args.num_class):
s_cate_i = previous_domain[label_for_previous == i]
t_cate_i = current_domain[label_for_current == i]
num_source = s_cate_i.size(0)
num_target = t_cate_i.size(0)
indices_previous = torch.randperm(num_source)
indices_current = torch.randperm(num_target)
s_cate_i = s_cate_i[indices_previous]
t_cate_i = t_cate_i[indices_current]
wasserstein_infinity = wasserstein_infinity_calculation(s_cate_i.cpu().numpy(), t_cate_i.cpu().numpy())
wasser_dis_pc.append(wasserstein_infinity)
wasser_dis_pc = max(wasser_dis_pc)
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('wasser-distance ST: %3f, consecutive: %3f, intra_s: %3f, intra_t: %3f' % (wasser_dis_st, wasser_dis_pc, wasser_dis_same_domain_s, wasser_dis_same_domain_t))
log.close()
def get_pseudo_labels(self, index_batch):
index_list = index_batch.numpy().tolist()
valid_u = []
for index, element in enumerate(index_list):
if element in self.selected_index:
valid_u.append(True)
else:
valid_u.append(False)
selected_index_in_all_data = index_batch[valid_u]
label_u_hard = self.all_hard_pseudo_label[selected_index_in_all_data.numpy().tolist()]
label_u_soft = self.all_soft_pseudo_label[selected_index_in_all_data.numpy().tolist()]
return label_u_hard.cuda(), label_u_soft.cuda(), valid_u
def get_source_index(self, index_source):
index_source = index_source.numpy().tolist()
valid_u = []
for index, element in enumerate(index_source):
if element in self.selected_index_source:
valid_u.append(True)
else:
valid_u.append(False)
return valid_u
def get_source_soft_weight(self, index_source):
return self.source_soft_weight[index_source]
def solve(self):
stop = False
counter=0
best_prec1_val = 0
self.pre_train_classifier()
self.iters = 0 ### initial the training iteration
if self.args.initial_only:
self.Get_pseudo_labels_with_classifiers_consistency()
return 0
while not stop:
#### adopted in the overall weight setting.
self.weight_source = self.iters / (self.args.max_iters * 0.9)
if self.weight_source >= 1:
self.weight_source | |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to GCE VM networking.
The Firewall class provides a way of opening VM ports. The Network class allows
VMs to communicate via internal ips and isolates PerfKitBenchmarker VMs from
others in the
same project. See https://developers.google.com/compute/docs/networking for
more information about GCE VM networking.
"""
import json
import logging
import threading
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from absl import flags
from perfkitbenchmarker import context
from perfkitbenchmarker import errors
from perfkitbenchmarker import network
from perfkitbenchmarker import placement_group
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import vpn_service
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import gce_placement_group
from perfkitbenchmarker.providers.gcp import util
import six
FLAGS = flags.FLAGS
NETWORK_RANGE = '10.0.0.0/8'
ALLOW_ALL = 'tcp:1-65535,udp:1-65535,icmp'
class GceVpnGateway(network.BaseVpnGateway):
"""Object representing a GCE VPN Gateway."""
CLOUD = gcp.CLOUD
def __init__(self, name: str, network_name: str, region: str, cidr: str,
project: str):
super(GceVpnGateway, self).__init__()
self.forwarding_rules: Dict[str, GceForwardingRule] = {}
self.forwarding_rules_lock = threading.Lock()
self.tunnels: Dict[str, GceStaticTunnel] = {}
self.routes: Dict[str, GceRoute] = {}
self.ip_resource = None
self.vpn_gateway_resource = GceVpnGatewayResource(
name, network_name, region, cidr, project)
self.vpn_gateway_resource_lock = threading.Lock()
self.name = name
self.network_name = network_name
self.region = region
self.cidr = cidr
self.project = project
self.ip_address = None
self.ip_address_lock = threading.Lock()
self.created = False
self.require_target_to_init = False
self.routing = None
self.psk = None
# Add gateway to benchmark spec at init().
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('GetNetwork called in a thread without a '
'BenchmarkSpec.')
key = self.name
with benchmark_spec.vpn_gateways_lock:
if key not in benchmark_spec.vpn_gateways:
benchmark_spec.vpn_gateways[key] = self
def ConfigureTunnel(self, tunnel_config: vpn_service.TunnelConfig):
"""Updates tunnel config with new information.
Args:
tunnel_config: The tunnel configuration for this VPN.
"""
logging.debug('Configuring Tunnel with params:')
logging.debug(tunnel_config)
# update tunnel_config if needed
if self.name not in tunnel_config.endpoints:
logging.debug('tunnel_config: This endpoint isnt registered yet... %s',
self.name)
tunnel_config.endpoints[self.name] = {
'is_configured': False,
'cidr': self.cidr,
'project': self.project,
'network_name': self.network_name,
'region': self.region,
'require_target_to_init': self.require_target_to_init,
}
# attach public IP to this gateway if doesnt exist
# and update tunnel_config if needed
# requires project, region, name
with self.ip_address_lock:
if not self.ip_address:
if not self.ip_resource:
self.ip_resource = GceIPAddress(self.project, self.region, self.name)
self.ip_resource.Create()
self.ip_address = self.ip_resource.ip_address
if 'ip_address' not in tunnel_config.endpoints[self.name]:
logging.debug('tunnel_config: Configuring IP for %s', self.name)
tunnel_config.endpoints[self.name]['ip_address'] = self.ip_address
# configure forwarding
# requires: -
with self.forwarding_rules_lock:
if len(self.forwarding_rules) == 3:
logging.debug('tunnel_config: Forwarding already configured, skipping')
else:
logging.debug('tunnel_config: Setting up forwarding')
self._SetupForwarding(tunnel_config)
# Abort if we don't have a target info configured yet
if len(tunnel_config.endpoints) < 2:
logging.debug('tunnel_config: Only found %d endpoints... '
'waiting for target to configure',
len(tunnel_config.endpoints))
return
# Get target endpoint config key
target_endpoint = [k for k in tunnel_config.endpoints.keys()
if k not in self.name][0]
# configure tunnel resources
# requires: target_ip_address, IKE version (default 1),
if 'ip_address' not in tunnel_config.endpoints[target_endpoint]:
logging.debug('tunnel_config: Target IP needed... '
'waiting for target to configure')
return
if not hasattr(tunnel_config, 'psk'):
logging.debug('tunnel_config: PSK not provided... setting to runid')
tunnel_config.psk = 'key' + FLAGS.run_uri
self._SetupTunnel(tunnel_config)
# configure routing
# requires: next_hop_tunnel_id, target_cidr,
# TODO(dlott) Should be Optional[str], but that requires making endpoints a
# proper class rather than a dictionary of string and bool. See TunnelConfig
dest_cidr: Optional[Any] = tunnel_config.endpoints[target_endpoint].get(
'cidr')
if not dest_cidr or not dest_cidr.strip():
logging.debug('tunnel_config: destination CIDR needed... '
'waiting for target to configure')
return
self._SetupRouting(
tunnel_config.suffix,
tunnel_config.endpoints[self.name]['tunnel_id'],
tunnel_config.endpoints[target_endpoint]['cidr'])
tunnel_config.endpoints[self.name]['is_configured'] = True
def IsTunnelReady(self, tunnel_id: str) -> bool:
"""Returns True if the tunnel is up and ready for traffic.
Args:
tunnel_id: The id of the tunnel to check.
Returns:
boolean.
"""
return self.tunnels[tunnel_id].IsReady()
def _SetupTunnel(self, tunnel_config: vpn_service.TunnelConfig):
"""Register a new GCE VPN tunnel for this endpoint.
Args:
tunnel_config: VPN tunnel configuration.
"""
target_endpoint = [k for k in tunnel_config.endpoints.keys()
if k not in self.name][0]
project = tunnel_config.endpoints[self.name]['project']
region = tunnel_config.endpoints[self.name]['region']
vpn_gateway_id = self.name
target_ip = tunnel_config.endpoints[target_endpoint]['ip_address']
psk = tunnel_config.psk
ike_version = tunnel_config.ike_version
suffix = tunnel_config.suffix
name = 'tun-' + self.name + '-' + suffix
if name not in self.tunnels:
self.tunnels[name] = GceStaticTunnel(
project, region, name, vpn_gateway_id, target_ip, ike_version, psk)
self.tunnels[name].Create()
tunnel_config.endpoints[self.name]['tunnel_id'] = name
def _SetupForwarding(self, tunnel_config: vpn_service.TunnelConfig):
"""Create IPSec forwarding rules.
Forwards ESP protocol, and UDP 500/4500 for tunnel setup.
Args:
tunnel_config: The tunnel configuration for this VPN.
"""
if len(self.forwarding_rules) == 3:
return # backout if already set
suffix = tunnel_config.suffix
# GCP doesnt like uppercase names?!?
fr_UDP500_name = ('fr-udp500-%s-%s' %
(suffix, FLAGS.run_uri))
fr_UDP4500_name = ('fr-udp4500-%s-%s' %
(suffix, FLAGS.run_uri))
fr_ESP_name = ('fr-esp-%s-%s' %
(suffix, FLAGS.run_uri))
if fr_UDP500_name not in self.forwarding_rules:
fr_UDP500 = GceForwardingRule(
fr_UDP500_name, 'UDP', self, 500)
self.forwarding_rules[fr_UDP500_name] = fr_UDP500
fr_UDP500.Create()
if fr_UDP4500_name not in self.forwarding_rules:
fr_UDP4500 = GceForwardingRule(
fr_UDP4500_name, 'UDP', self, 4500)
self.forwarding_rules[fr_UDP4500_name] = fr_UDP4500
fr_UDP4500.Create()
if fr_ESP_name not in self.forwarding_rules:
fr_ESP = GceForwardingRule(
fr_ESP_name, 'ESP', self)
self.forwarding_rules[fr_ESP_name] = fr_ESP
fr_ESP.Create()
def _SetupRouting(self, suffix: str, next_hop_tun: str, dest_cidr: str):
"""Create IPSec routing rules between the source and target gateways."""
route_name = 'route-' + self.name + '-' + suffix
if route_name not in self.routes:
self.routes[route_name] = GceRoute(
route_name, dest_cidr, self.network_name, next_hop_tun,
self.region, self.project)
self.routes[route_name].Create()
def Create(self):
"""Creates the actual VpnGateway."""
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('GetNetwork called in a thread without a '
'BenchmarkSpec.')
if self.created:
return
self.vpn_gateway_resource.Create()
self.created = True
def Delete(self):
"""Deletes the actual VpnGateway."""
if self.ip_resource:
self.ip_resource.Delete()
if self.tunnels:
vm_util.RunThreaded(lambda tun: self.tunnels[tun].Delete(),
list(self.tunnels.keys()))
if self.forwarding_rules:
vm_util.RunThreaded(lambda fr: self.forwarding_rules[fr].Delete(),
list(self.forwarding_rules.keys()))
if self.routes:
vm_util.RunThreaded(lambda route: self.routes[route].Delete(),
list(self.routes.keys()))
if self.vpn_gateway_resource:
self.vpn_gateway_resource.Delete()
self.created = False
class GceVpnGatewayResource(resource.BaseResource):
"""Object representing a GCE VPN Gateway Resource."""
def __init__(self, name: str, network_name: str, region: str, cidr: str,
project: str):
super(GceVpnGatewayResource, self).__init__()
self.name = name
self.network_name = network_name
self.region = region
self.cidr = cidr
self.project = project
def _Create(self):
cmd = util.GcloudCommand(self, 'compute', 'target-vpn-gateways', 'create',
self.name)
cmd.flags['network'] = self.network_name
cmd.flags['region'] = self.region
cmd.Issue()
def _Exists(self):
cmd = util.GcloudCommand(self, 'compute', 'target-vpn-gateways', 'describe',
self.name)
cmd.flags['region'] = self.region
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return not retcode
def _Delete(self):
cmd = util.GcloudCommand(self, 'compute', 'target-vpn-gateways', 'delete',
self.name)
cmd.flags['region'] = self.region
cmd.Issue(raise_on_failure=False)
class GceIPAddress(resource.BaseResource):
"""Object representing a GCE IP address."""
def __init__(self, project: str, region: str, name: str):
super(GceIPAddress, self).__init__()
self.project = project
self.region = region
self.name = name
self.ip_address = None
def _Create(self):
"""Allocates a public IP for the VPN gateway."""
cmd = util.GcloudCommand(self, 'compute', 'addresses', 'create', self.name)
cmd.flags['region'] = self.region
cmd.Issue()
def _PostCreate(self):
cmd = util.GcloudCommand(self, 'compute', 'addresses', 'describe',
self.name)
cmd.flags['region'] = self.region
cmd.flags['format'] = 'value(address)'
stdout, _, _ = cmd.Issue()
self.ip_address = stdout.rstrip()
def _Delete(self):
"""Deletes a public IP for the VPN gateway."""
cmd = util.GcloudCommand(self, 'compute', 'addresses', 'delete', self.name)
cmd.flags['region'] = self.region
cmd.Issue(raise_on_failure=False)
def _Exists(self) -> bool:
"""Returns True if the IP address exists."""
cmd = util.GcloudCommand(self, 'compute', 'addresses', 'describe',
self.name)
cmd.flags['region'] = self.region
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return not retcode
class GceStaticTunnel(resource.BaseResource):
"""An object representing a GCE Tunnel."""
def __init__(self, project: str, region: str, name: str, vpn_gateway_id: str,
target_ip: str, ike_version: str, psk: str):
super(GceStaticTunnel, self).__init__()
self.project = project
self.region = region
self.name = name
self.vpn_gateway_id = vpn_gateway_id
self.target_ip = target_ip
self.ike_version = ike_version
self.psk = psk
def _Create(self):
"""Creates the Tunnel."""
cmd = util.GcloudCommand(self, 'compute', 'vpn-tunnels', 'create',
self.name)
cmd.flags['peer-address'] = self.target_ip
cmd.flags['target-vpn-gateway'] = self.vpn_gateway_id
cmd.flags['ike-version'] = self.ike_version
cmd.flags['local-traffic-selector'] = '0.0.0.0/0'
cmd.flags['remote-traffic-selector'] = '0.0.0.0/0'
cmd.flags['shared-secret'] = self.psk
cmd.flags['region'] = self.region
cmd.Issue()
def _Delete(self):
"""Delete IPSec tunnel."""
cmd = util.GcloudCommand(self, 'compute', 'vpn-tunnels', 'delete',
self.name)
cmd.flags['region'] = self.region
cmd.Issue(raise_on_failure=False)
def _Exists(self) -> bool:
"""Returns True if the tunnel exists."""
cmd = util.GcloudCommand(self, 'compute', 'vpn-tunnels', 'describe',
self.name)
cmd.flags['region'] = self.region
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
return not retcode
def IsReady(self) -> bool:
cmd = util.GcloudCommand(self, 'compute', 'vpn-tunnels', 'describe',
self.name)
cmd.flags['region'] = self.region
response = cmd.Issue(suppress_warning=True)
return 'established' in str(response).lower()
class GceRoute(resource.BaseResource):
"""An object representing a GCE Route."""
def | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable
import pytest
from cirq.line.placement.greedy import (
GreedySequenceSearch,
GreedySequenceSearchStrategy,
MinimalConnectivityGreedySequenceSearch,
LargestAreaGreedySequenceSearch
)
from cirq.line.placement.sequence import GridQubitLineTuple
from cirq.devices import GridQubit
from cirq.google import XmonDevice
from cirq.testing.mock import mock
from cirq.value import Duration
def _create_device(qubits: Iterable[GridQubit]):
return XmonDevice(Duration(nanos=0), Duration(nanos=0), Duration(nanos=0),
qubits)
def test_greedy_sequence_search_fails_on_wrong_start_qubit():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
with pytest.raises(ValueError):
GreedySequenceSearch(_create_device([q00]), q01)
def test_get_or_search_calls_find_sequence_once():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
search = GreedySequenceSearch(_create_device([q00, q01]), q00)
with mock.patch.object(search, '_find_sequence') as find_sequence:
sequence = [q00, q01]
find_sequence.return_value = sequence
assert search.get_or_search() == sequence
find_sequence.assert_called_once_with()
assert search.get_or_search() == sequence
find_sequence.assert_called_once_with()
def test_find_sequence_assembles_head_and_tail():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
qubits = [q00, q01, q02]
start = q01
search = GreedySequenceSearch(_create_device(qubits), start)
with mock.patch.object(search, '_sequence_search') as sequence_search:
head = [q01, q00]
tail = [q01, q02]
sequence_search.side_effect = [tail, head]
assert search._find_sequence() == qubits
sequence_search.assert_has_calls(
[mock.call(start, []), mock.call(start, tail)])
def test_find_sequence_calls_expand_sequence():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
qubits = [q00, q01, q02]
start = q01
search = GreedySequenceSearch(_create_device(qubits), start)
with mock.patch.object(
search, '_sequence_search') as sequence_search, mock.patch.object(
search, '_expand_sequence') as expand_sequence:
head = [q01, q00]
tail = [q01, q02]
sequence_search.side_effect = [tail, head]
search._find_sequence()
expand_sequence.assert_called_once_with(qubits)
def test_search_sequence_calls_choose_next_qubit():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
qubits = [q00, q01, q02]
search = GreedySequenceSearch(_create_device(qubits), q01)
with mock.patch.object(search, '_choose_next_qubit') as choose_next_qubit:
choose_next_qubit.return_value = None
search._sequence_search(q01, [])
choose_next_qubit.assert_called_once_with(q01, {q01})
with mock.patch.object(search, '_choose_next_qubit') as choose_next_qubit:
choose_next_qubit.return_value = None
search._sequence_search(q01, [q00])
choose_next_qubit.assert_called_once_with(q01, {q00, q01})
def test_search_sequence_assembles_sequence():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
qubits = [q00, q01, q02]
search = GreedySequenceSearch(_create_device(qubits), q01)
with mock.patch.object(search, '_choose_next_qubit') as choose_next_qubit:
choose_next_qubit.side_effect = [q01, q02, None]
assert search._sequence_search(q00, []) == [q00, q01, q02]
def test_find_path_between_finds_path():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
q10 = GridQubit(1, 0)
q11 = GridQubit(1, 1)
q12 = GridQubit(1, 2)
q20 = GridQubit(2, 0)
q21 = GridQubit(2, 1)
q22 = GridQubit(2, 2)
qubits = [q00, q01, q10, q11]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._find_path_between(q00, q01, {q00, q01}) == [q10, q11]
# path1: + + + path2: +-+-+
# | |
# + + + +
# | |
# + + + +-+-+
qubits = [q00, q01, q02, q10, q20, q21, q22, q12]
path_1 = [q00, q01, q02]
path_2 = [q00, q10, q20, q21, q22, q12, q02]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._find_path_between(q00, q02, set(path_1)) == path_2[1:-1]
assert search._find_path_between(q02, q00, set(path_1)) == path_2[-2:0:-1]
assert search._find_path_between(q00, q02, set(path_2)) == path_1[1:-1]
assert search._find_path_between(q02, q00, set(path_2)) == path_1[-2:0:-1]
def test_find_path_between_does_not_find_path():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
q10 = GridQubit(1, 0)
q20 = GridQubit(2, 0)
q22 = GridQubit(2, 2)
q12 = GridQubit(1, 2)
qubits = [q00, q01]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._find_path_between(q00, q01, {q00, q01}) is None
qubits = [q00, q01, q10]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._find_path_between(q00, q01, {q00, q01}) is None
# + + +
# |
# +
# |
# + + +
qubits = [q00, q01, q02, q10, q20, q22, q12]
path_1 = [q00, q01, q02]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._find_path_between(q00, q02, set(path_1)) is None
def test_expand_sequence_expands_sequence():
q00 = GridQubit(0, 0)
q01 = GridQubit(0, 1)
q02 = GridQubit(0, 2)
q03 = GridQubit(0, 3)
q04 = GridQubit(0, 4)
q10 = GridQubit(1, 0)
q11 = GridQubit(1, 1)
q12 = GridQubit(1, 2)
q13 = GridQubit(1, 3)
q14 = GridQubit(1, 4)
# + + -> +-+
# | |
# + + +-+
qubits = [q00, q01, q10, q11]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._expand_sequence([q00, q01]) == [q00, q10, q11, q01]
# + + -> +-+
# | |
# + + +-+
# | |
# + +
qubits = [q00, q01, q02, q10, q11]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._expand_sequence([q00, q01, q02]) == [q00, q10, q11, q01,
q02]
# + -> +
# | |
# + + +-+
# | |
# + + +-+
qubits = [q00, q01, q02, q11, q12]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._expand_sequence([q00, q01, q02]) == [q00, q01, q11, q12,
q02]
# + -> +
# | |
# + + +-+
# | |
# + + +-+
# | |
# + +
qubits = [q00, q01, q02, q03, q11, q12]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._expand_sequence([q00, q01, q02, q03]) == [q00, q01, q11,
q12,
q02, q03]
# + + -> +-+
# | |
# + + +-+
# | |
# + +
# | |
# + + +-+
# | |
# + + +-+
qubits = [q00, q01, q02, q03, q04, q10, q11, q13, q14]
start = q00
search = GreedySequenceSearch(_create_device(qubits), start)
assert search._expand_sequence([q00, q01, q02, q03, q04]) == [q00, q10,
q11,
q01, q02,
q03,
q13, q14,
q04]
def test_minimal_sequence_search_chooses_minimal():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q20 = GridQubit(2, 0)
q21 = GridQubit(2, 1)
qubits = [q00, q10, q20, q21]
search = MinimalConnectivityGreedySequenceSearch(_create_device(qubits),
q10)
# + *-+
# |
# +
assert search._choose_next_qubit(q10, {q10}) == q20
assert search._choose_next_qubit(q20, {q10, q20}) == q21
def test_minimal_sequence_search_does_not_use_used():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q20 = GridQubit(2, 0)
q21 = GridQubit(2, 1)
qubits = [q00, q10, q20, q21]
search = MinimalConnectivityGreedySequenceSearch(_create_device(qubits),
q10)
# + *-+
#
# +
assert search._choose_next_qubit(q10, {q00, q10}) == q20
def test_minimal_sequence_search_returns_none_for_single_node():
q00 = GridQubit(0, 0)
qubits = [q00]
search = MinimalConnectivityGreedySequenceSearch(_create_device(qubits),
q00)
assert search._choose_next_qubit(q00, {q00}) is None
def test_minimal_sequence_search_returns_none_when_blocked():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
qubits = [q00, q10]
search = MinimalConnectivityGreedySequenceSearch(_create_device(qubits),
q10)
assert search._choose_next_qubit(q10, {q00, q10}) is None
def test_minimal_sequence_search_traverses_grid():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q11 = GridQubit(1, 1)
q20 = GridQubit(2, 0)
q30 = GridQubit(3, 0)
q40 = GridQubit(4, 0)
q41 = GridQubit(4, 1)
q42 = GridQubit(4, 2)
q50 = GridQubit(5, 0)
qubits = [q00, q10, q11, q20, q30, q40, q50, q41, q42]
search = MinimalConnectivityGreedySequenceSearch(_create_device(qubits),
q20)
# + + *-+-+-+
# |
# + +
# |
# +
assert search._choose_next_qubit(q20, {q20}) == q30
assert search._choose_next_qubit(q30, {q20, q30}) == q40
assert search._choose_next_qubit(q40, {q20, q30, q40}) == q41
assert search._choose_next_qubit(q41, {q20, q30, q40, q41}) == q42
assert search._choose_next_qubit(q42, {q20, q30, q40, q41, q42}) is None
# +-+-+-+-+ +
# |
# + +
# |
# *
assert search._choose_next_qubit(q42, {q42}) == q41
assert search._choose_next_qubit(q41, {q42, q41}) == q40
assert search._choose_next_qubit(q40, {q42, q41, q40}) == q30
assert search._choose_next_qubit(q30, {q42, q41, q40, q30}) == q20
assert search._choose_next_qubit(q20, {q42, q41, q40, q30, q20}) == q10
assert search._choose_next_qubit(q10,
{q42, q41, q40, q30, q20, q10}) == q11
assert search._choose_next_qubit(q11, {q42, q41, q40, q30, q20, q10,
q11}) is None
def test_largest_sequence_search_chooses_largest():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q20 = GridQubit(2, 0)
q21 = GridQubit(2, 1)
qubits = [q00, q10, q20, q21]
search = LargestAreaGreedySequenceSearch(_create_device(qubits), q10)
# + *-+
#
# +
assert search._choose_next_qubit(q10, {q10}) == q20
def test_largest_sequence_search_does_not_use_used():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q20 = GridQubit(2, 0)
q21 = GridQubit(2, 1)
qubits = [q00, q10, q20, q21]
search = LargestAreaGreedySequenceSearch(_create_device(qubits), q10)
# +-* X
#
# +
assert search._choose_next_qubit(q10, {q10, q20}) == q00
def test_largest_sequence_search_traverses_grid():
q00 = GridQubit(0, 0)
q10 = GridQubit(1, 0)
q11 = GridQubit(1, 1)
q20 = GridQubit(2, 0)
q30 = GridQubit(3, 0)
q40 = GridQubit(4, 0)
q41 = GridQubit(4, 1)
q42 = GridQubit(4, 2)
q50 = GridQubit(5, 0)
qubits = [q00, q10, | |
has no container {cont_config.name}"
for resource in Resource.values():
current_state = None
container_requirements = container.get_resource_requirements(resource)
get_requirements = getattr(cont_config, resource).get
for requirement in get_requirements:
current_state = container_requirements.get(requirement)
if current_state:
break
assert current_state, (
f"{type(target_controller).__name__} {target_config.name} target container {cont_config.name} spec does not define the resource {resource}. "
f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}"
)
@servo.multicheck('Containers in the "{item.name}" Deployment have resource requirements')
async def check_kubernetes_resource_requirements(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_dep_resource_requirements(
dep_config: DeploymentConfiguration,
) -> None:
deployment = await Deployment.read(dep_config.name, dep_config.namespace)
await self._check_container_resource_requirements(deployment, dep_config)
return (self.config.deployments or []), check_dep_resource_requirements
@servo.multicheck('Containers in the "{item.name}" Rollout have resource requirements')
async def check_kubernetes_rollout_resource_requirements(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rol_resource_requirements(
rol_config: RolloutConfiguration,
) -> None:
rollout = await Rollout.read(rol_config.name, rol_config.namespace)
await self._check_container_resource_requirements(rollout, rol_config)
return (self.config.rollouts or []), check_rol_resource_requirements
@servo.multicheck('Deployment "{item.name}" is ready')
async def check_kubernetes_deployments_are_ready(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_deployment(dep_config: DeploymentConfiguration) -> None:
deployment = await Deployment.read(dep_config.name, dep_config.namespace)
if not await deployment.is_ready():
raise RuntimeError(f'Deployment "{deployment.name}" is not ready')
return (self.config.deployments or []), check_deployment
@servo.multicheck('Rollout "{item.name}" is ready')
async def check_kubernetes_rollouts_are_ready(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rollout(rol_config: RolloutConfiguration) -> None:
rollout = await Rollout.read(rol_config.name, rol_config.namespace)
if not await rollout.is_ready():
raise RuntimeError(f'Rollout "{rollout.name}" is not ready')
return (self.config.rollouts or []), check_rollout
@servo.metadata(
description="Kubernetes adjust connector",
version="1.5.0",
homepage="https://github.com/opsani/kubernetes-connector",
license=servo.License.apache2,
maturity=servo.Maturity.stable,
)
class KubernetesConnector(servo.BaseConnector):
config: KubernetesConfiguration
@servo.on_event()
async def attach(self, servo_: servo.Servo) -> None:
# Ensure we are ready to talk to Kubernetes API
await self.config.load_kubeconfig()
self.telemetry[f"{self.name}.namespace"] = self.config.namespace
with self.logger.catch(level="DEBUG", message=f"Unable to set version telemetry for connector {self.name}"):
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
version_obj = await v1.get_code()
self.telemetry[f"{self.name}.version"] = f"{version_obj.major}.{version_obj.minor}"
self.telemetry[f"{self.name}.platform"] = version_obj.platform
@servo.on_event()
async def detach(self, servo_: servo.Servo) -> None:
self.telemetry.remove(f"{self.name}.namespace")
self.telemetry.remove(f"{self.name}.version")
self.telemetry.remove(f"{self.name}.platform")
@servo.on_event()
async def describe(self, control: servo.Control = servo.Control()) -> servo.Description:
state = await self._create_optimizations()
return state.to_description()
@servo.on_event()
async def components(self) -> List[servo.Component]:
state = await self._create_optimizations()
return state.to_components()
@servo.before_event(servo.Events.measure)
async def before_measure(self, *, metrics: List[str] = None, control: servo.Control = servo.Control()) -> None:
# Build state before a measurement to ensure all necessary setup is done
# (e.g., Tuning Pod is up and running)
await self._create_optimizations()
@servo.on_event()
async def adjust(
self, adjustments: List[servo.Adjustment], control: servo.Control = servo.Control()
) -> servo.Description:
state = await self._create_optimizations()
# Apply the adjustments and emit progress status
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting up to {p.timeout} for adjustments to be applied...", prefix=False),
progress=p.progress,
)
progress = servo.EventProgress(timeout=self.config.timeout)
future = asyncio.create_task(state.apply(adjustments))
future.add_done_callback(lambda _: progress.trigger())
await asyncio.gather(
future,
progress.watch(progress_logger),
)
# Handle settlement
settlement = control.settlement or self.config.settlement
if settlement:
self.logger.info(
f"Settlement duration of {settlement} requested, waiting for pods to settle..."
)
progress = servo.DurationProgress(settlement)
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting {settlement} for pods to settle...", False),
progress=p.progress,
)
async def readiness_monitor() -> None:
while not progress.finished:
if not await state.is_ready():
# Raise a specific exception if the optimization defines one
try:
await state.raise_for_status()
except servo.AdjustmentRejectedError as e:
# Update rejections with start-failed to indicate the initial rollout was successful
if e.reason == "start-failed":
e.reason = "unstable"
raise
await asyncio.sleep(servo.Duration('50ms').total_seconds())
await asyncio.gather(
progress.watch(progress_logger),
readiness_monitor()
)
if not await state.is_ready():
self.logger.warning("Rejection triggered without running error handler")
raise servo.AdjustmentRejectedError(
"Optimization target became unready after adjustment settlement period (WARNING: error handler was not run)",
reason="unstable"
)
self.logger.info(
f"Settlement duration of {settlement} has elapsed, resuming optimization."
)
description = state.to_description()
return description
@servo.on_event()
async def check(
self,
matching: Optional[servo.CheckFilter],
halt_on: Optional[servo.ErrorSeverity] = servo.ErrorSeverity.critical,
) -> List[servo.Check]:
return await KubernetesChecks.run(
self.config, matching=matching, halt_on=halt_on
)
async def _create_optimizations(self) -> KubernetesOptimizations:
# Build a KubernetesOptimizations object with progress reporting
# This ensures that the Servo isn't reported as offline
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting up to {p.timeout} for Kubernetes optimization setup to complete", prefix=False),
progress=p.progress,
)
progress = servo.EventProgress(timeout=self.config.timeout)
future = asyncio.create_task(KubernetesOptimizations.create(self.config))
future.add_done_callback(lambda _: progress.trigger())
await asyncio.gather(
future,
progress.watch(progress_logger),
)
return future.result()
def selector_string(selectors: Mapping[str, str]) -> str:
"""Create a selector string from the given dictionary of selectors.
Args:
selectors: The selectors to stringify.
Returns:
The selector string for the given dictionary.
"""
return ",".join([f"{k}={v}" for k, v in selectors.items()])
def selector_kwargs(
fields: Mapping[str, str] = None,
labels: Mapping[str, str] = None,
) -> Dict[str, str]:
"""Create a dictionary of kwargs for Kubernetes object selectors.
Args:
fields: A mapping of fields used to restrict the returned collection of
Objects to only those which match these field selectors. By default,
no restricting is done.
labels: A mapping of labels used to restrict the returned collection of
Objects to only those which match these label selectors. By default,
no restricting is done.
Returns:
A dictionary that can be used as kwargs for many Kubernetes API calls for
label and field selectors.
"""
kwargs = {}
if fields is not None:
kwargs["field_selector"] = selector_string(fields)
if labels is not None:
kwargs["label_selector"] = selector_string(labels)
return kwargs
class ConfigMap(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `ConfigMap`_ API Object.
The actual ``kubernetes.client.V1ConfigMap`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `ConfigMap`_.
.. _ConfigMap:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#configmap-v1-core
"""
obj_type =kubernetes_asyncio.client.V1ConfigMap
api_clients = {
"preferred":kubernetes_asyncio.client.CoreV1Api,
"v1":kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
async def read(cls, name: str, namespace: str) -> "ConfigMap":
"""Read a ConfigMap by name under the given namespace.
Args:
name: The name of the Deployment to read.
namespace: The namespace to read the Deployment from.
"""
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_config_map(name, namespace)
return ConfigMap(obj)
async def create(self, namespace: str = None) -> None:
"""Create the ConfigMap under the given namespace.
Args:
namespace: The namespace to create the ConfigMap under.
If the ConfigMap was loaded via the kubetest client, the
namespace will already be set, so it is not needed here.
Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
servo.logger.info(f'creating configmap "{self.name}" in namespace "{self.namespace}"')
servo.logger.debug(f"configmap: {self.obj}")
self.obj = await self.api_client.create_namespaced_config_map(
namespace=namespace,
body=self.obj,
)
async def patch(self) -> None:
"""
Patches a ConfigMap.
"""
self.logger.info(f'patching ConfigMap "{self.name}"')
self.logger.trace(f"ConfigMap: {self.obj}")
async with self.api_client() as api_client:
await api_client.patch_namespaced_config_map(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the ConfigMap.
This method expects the ConfigMap to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Options for ConfigMap deletion.
Returns:
The status of the delete operation.
"""
if options is None:
options = kubernetes_asyncio.client.V1DeleteOptions()
servo.logger.info(f'deleting configmap "{self.name}"')
servo.logger.debug(f"delete options: {options}")
servo.logger.debug(f"configmap: {self.obj}")
return await self.api_client.delete_namespaced_config_map(
name=self.name,
namespace=self.namespace,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes ConfigMap resource."""
self.obj = await self.api_client.read_namespaced_config_map(
name=self.name,
namespace=self.namespace,
)
async def is_ready(self) -> bool:
"""Check if the ConfigMap is in the ready state.
ConfigMaps do not have a "status" field to check, so we will
measure their readiness status by whether or not they exist
on the cluster.
Returns:
True if in the ready state; False otherwise.
"""
try:
await self.refresh()
except: # noqa
return False
return True
def dns_subdomainify(name: str) -> str:
"""
Valid DNS Subdomain Names conform to [RFC 1123](https://tools.ietf.org/html/rfc1123) and must:
* contain no more than 253 characters
* contain only lowercase alphanumeric characters, '-' or '.'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
"""
# lowercase alphanumerics
name = name.lower()
# replace slashes with dots
name = re.sub(r'\/', '.', name)
# replace whitespace with hyphens
name = re.sub(r'\s', '-', name)
# strip any remaining disallowed characters
name = re.sub(r'/[^a-z0-9\.\-]+/g', '', name)
# truncate to our maximum length
name = name[:253]
# ensure starts with an alphanumeric by prefixing with `0-`
boundaryRegex = re.compile('^[a-z0-9]')
if not boundaryRegex.match(name):
name = ('0-' + name)[:253]
# ensure ends with an alphanumeric by suffixing with `-1`
if not boundaryRegex.match(name[-1]):
name = name[:251] + '-1'
return name
def dns_labelize(name: str) -> str:
"""
Transform a string into a valid Kubernetes label value.
Valid Kubernetes label values:
* must be 63 characters or | |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import DataFrame
from bigdl.optim.optimizer import MaxEpoch
from zoo.tfpark.tf_dataset import TFNdarrayDataset
from zoo.tfpark.model import _standarize_feature_label_dataset
from zoo.common.utils import load_from_file
from zoo.orca.data.tf.data import Dataset, TFDataDataset2
from zoo.orca.data import SparkXShards
from zoo.orca.learn.tf.utils import *
from zoo.orca.learn.trigger import Trigger
from zoo.orca.learn.utils import find_latest_checkpoint, convert_predict_to_xshard
from zoo.tfpark import KerasModel
from zoo.tfpark import TFOptimizer, TFNet, ZooOptimizer
from zoo.tfpark.tf_optimizer import StatelessMetric
from zoo.tfpark.utils import evaluate_metrics
from zoo.util import nest
from zoo.util.tf import save_tf_checkpoint
class Estimator(object):
def fit(self, data, epochs, **kwargs):
pass
def predict(self, data, **kwargs):
pass
def evaluate(self, data, **kwargs):
pass
def load_orca_checkpoint(self, path, version):
"""
Load specified Orca checkpoint.
:param path: checkpoint directory which contains model.* and
optimMethod-TFParkTraining.* files.
:param version: checkpoint version, which is the suffix of model.* file,
i.e., for modle.4 file, the version is 4.
"""
self.load_checkpoint = True
self.checkpoint_path = path
self.checkpoint_version = version
def load_latest_orca_checkpoint(self, path):
"""
Load latest Orca checkpoint under specified directory.
:param path: directory containing Orca checkpoint files.
"""
ckpt_path, _, version = find_latest_checkpoint(path, model_type="tf")
if ckpt_path is None:
raise Exception("Cannot find checkpoint")
self.load_orca_checkpoint(ckpt_path, version)
def set_tensorboard(self, log_dir, app_name):
"""
Set summary information during the training process for visualization purposes.
Saved summary can be viewed via TensorBoard.
In order to take effect, it needs to be called before fit.
Training summary will be saved to 'log_dir/app_name/train'
and validation summary (if any) will be saved to 'log_dir/app_name/validation'.
# Arguments
:param log_dir: The base directory path to store training and validation logs.
:param app_name: The name of the application.
"""
self.log_dir = log_dir
self.app_name = app_name
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
# Arguments
tag: The string variable represents the scalar wanted
"""
if self.tf_optimizer:
return self.tf_optimizer.estimator.get_train_summary(tag)
return None
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
Note: The metric and tag may not be consistent
Please look up following form to pass tag parameter
Left side is your metric during compile
Right side is the tag you should pass
'Accuracy' | 'Top1Accuracy'
'BinaryAccuracy' | 'Top1Accuracy'
'CategoricalAccuracy' | 'Top1Accuracy'
'SparseCategoricalAccuracy' | 'Top1Accuracy'
'AUC' | 'AucScore'
'HitRatio' | 'HitRate@k' (k is Top-k)
'Loss' | 'Loss'
'MAE' | 'MAE'
'NDCG' | 'NDCG'
'TFValidationMethod' | '${name + " " + valMethod.toString()}'
'Top5Accuracy' | 'Top5Accuracy'
'TreeNNAccuracy' | 'TreeNNAccuracy()'
'MeanAveragePrecision' | 'MAP@k' (k is Top-k) (BigDL)
'MeanAveragePrecision' | 'PascalMeanAveragePrecision' (Zoo)
'StatelessMetric' | '${name}'
# Arguments
tag: The string variable represents the scalar wanted
"""
if self.tf_optimizer:
for val_method in self.tf_optimizer.tf_model.val_methods:
if isinstance(val_method, StatelessMetric):
if tag == val_method.name:
return self.tf_optimizer.estimator.get_validation_summary(tag)
else:
if tag == str(val_method.val_method):
return self.tf_optimizer.estimator.\
get_validation_summary("{} {}".format(val_method.name, tag))
continue
return None
@staticmethod
def from_graph(*, inputs, outputs=None,
labels=None, loss=None, optimizer=None,
clip_norm=None, clip_value=None,
metrics=None, updates=None,
sess=None, model_dir=None, backend="bigdl"):
"""
Create an Estimator for tesorflow graph.
:param inputs: input tensorflow tensors.
:param outputs: output tensorflow tensors.
:param labels: label tensorflow tensors.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optimizer: tensorflow optimization method.
:param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds
this value.
:param clip_value: a float >= 0 or a tuple of two floats.
If clip_value is a float, gradients will be clipped when their absolute value
exceeds this value.
If clip_value is a tuple of two floats, gradients will be clipped when their value less
than clip_value[0] or larger than clip_value[1].
:param metrics: metric tensor.
:param sess: the current TensorFlow Session, if you want to used a pre-trained model,
you should use the Session to load the pre-trained variables and pass it to estimator
:param model_dir: location to save model checkpoint and summaries.
:param backend: backend for estimator. Now it only can be "bigdl".
:return: an Estimator object.
"""
assert backend == "bigdl", "only bigdl backend is supported for now"
return TFOptimizerWrapper(inputs=inputs,
outputs=outputs,
labels=labels,
loss=loss,
optimizer=optimizer,
clip_norm=clip_norm,
clip_value=clip_value,
metrics=metrics, updates=updates,
sess=sess,
model_dir=model_dir
)
@staticmethod
def from_keras(keras_model, metrics=None, model_dir=None, optimizer=None, backend="bigdl"):
"""
Create an Estimator from a tensorflow.keras model. The model must be compiled.
:param keras_model: the tensorflow.keras model, which must be compiled.
:param metrics: user specified metric.
:param model_dir: location to save model checkpoint and summaries.
:param optimizer: an optional bigdl optimMethod that will override the optimizer in
keras_model.compile
:param backend: backend for estimator. Now it only can be "bigdl".
:return: an Estimator object.
"""
assert backend == "bigdl", "only bigdl backend is supported for now"
return TFKerasWrapper(keras_model, metrics, model_dir, optimizer)
def save_tf_checkpoint(self, path):
"""
Save tensorflow checkpoint in this estimator.
:param path: tensorflow checkpoint path.
"""
raise NotImplementedError()
def save_keras_model(self, path, overwrite=True):
"""
Save tensorflow keras model in this estimator.
:param path: keras model save path.
:param overwrite: Whether to silently overwrite any existing file at the target location.
"""
raise NotImplementedError()
@staticmethod
def load_keras_model(path):
"""
Create Estimator by loading an existing keras model (with weights) from HDF5 file.
:param path: String. The path to the pre-defined model.
:return: Orca TF Estimator.
"""
from tensorflow.python.keras import models
def load_func(file_path):
return models.load_model(file_path)
model = load_from_file(load_func, path)
return Estimator.from_keras(keras_model=model)
def save_keras_weights(self, filepath, overwrite=True, save_format=None):
"""
Save tensorflow keras model weights in this estimator.
:param path: keras model weights save path.
:param overwrite: Whether to silently overwrite any existing file at the target location.
:param save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
"""
raise NotImplementedError()
def load_keras_weights(self, filepath, by_name=False):
"""
Save tensorflow keras model in this estimator.
:param filepath: keras model weights save path.
:param by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
"""
raise NotImplementedError()
def is_tf_data_dataset(data):
is_dataset = isinstance(data, tf.data.Dataset)
is_dataset_v2 = isinstance(data, tf.python.data.ops.dataset_ops.DatasetV2)
return is_dataset or is_dataset_v2
def to_dataset(data, batch_size, batch_per_thread, validation_data,
feature_cols, labels_cols, hard_code_batch_size,
sequential_order, shuffle, auto_shard_files):
# todo wrap argument into kwargs
if validation_data:
if isinstance(data, SparkXShards):
assert isinstance(validation_data, SparkXShards), \
"train data and validation data should be both SparkXShards"
if isinstance(data, Dataset):
assert isinstance(validation_data, Dataset), \
"train data and validation data should be both orca.data.tf.Dataset"
if isinstance(data, DataFrame):
assert isinstance(validation_data, DataFrame), \
"train data and validation data should be both Spark DataFrame"
if isinstance(data, tf.data.Dataset):
assert isinstance(validation_data, tf.data.Dataset), \
"train data and validation data should be both tf.data.Dataset"
if isinstance(data, SparkXShards):
dataset = xshards_to_tf_dataset(data,
batch_size,
batch_per_thread,
validation_data,
hard_code_batch_size=hard_code_batch_size,
sequential_order=sequential_order,
shuffle=shuffle)
elif isinstance(data, Dataset):
dataset = TFDataDataset2(data, batch_size=batch_size,
batch_per_thread=batch_per_thread,
validation_dataset=validation_data)
elif isinstance(data, DataFrame):
dataset = TFDataset.from_dataframe(data, feature_cols, labels_cols,
batch_size,
batch_per_thread,
hard_code_batch_size,
validation_data,
sequential_order,
shuffle
)
elif is_tf_data_dataset(data):
dataset = TFDataset.from_tf_data_dataset(data,
batch_size,
batch_per_thread,
hard_code_batch_size,
validation_data,
sequential_order,
shuffle, auto_shard_files=auto_shard_files)
else:
raise ValueError("data must be SparkXShards or orca.data.tf.Dataset or "
"Spark DataFrame or tf.data.Dataset")
return dataset
class TFOptimizerWrapper(Estimator):
def __init__(self, *, inputs, outputs, labels, loss,
optimizer, clip_norm, clip_value,
metrics,
updates, sess,
model_dir
):
self.inputs = inputs
self.outputs = outputs
self.labels = labels
self.loss = loss
self.use_bigdl_optim = False
self.clip_norm = clip_norm
self.clip_value = clip_value
if optimizer is not None:
from zoo.orca.learn.optimizers import Optimizer
if isinstance(optimizer, Optimizer):
self.train_op = None
self.optimizer = optimizer.get_optimizer()
self.use_bigdl_optim = True
else:
assert isinstance(optimizer, tf.train.Optimizer), \
"optimizer is of type {}, ".format(type(optimizer)) + \
"it should be an instance of tf.train.Optimizer"
self.optimizer = ZooOptimizer(optimizer)
if clip_norm or clip_value:
gvs = self.optimizer.compute_gradients(self.loss)
if clip_norm:
gvs = [(tf.clip_by_norm(g_v[0], clip_norm), g_v[1]) for g_v in gvs]
if clip_value:
if isinstance(clip_value, tuple):
assert len(clip_value) == 2 and clip_value[0] < clip_value[1], \
"clip value should be (clip_min, clip_max)"
gvs = | |
is not None:
ms.set("show", "yes" if new_show else "no")
def del_manuscript(self, version_title, abbrev):
ms = self._book._get("manuscripts/ms", {"abbrev": abbrev}, self._book._get("version", {"title": version_title}))
# ms = self._get("version", {"title": version_title}).xpath("manuscripts/ms[@abbrev='{}'".format(abbrev))
ms.getparent().remove(ms)
def serialize(self, pretty=True):
return etree.tostring(self._book,
xml_declaration=True,
pretty_print=pretty,
**self._book._docinfo)
def save(self):
BookManager._save(self._book)
class Book(object):
"""
Parser and manipulator class for OCP XML files
It provides methods for:
get_book_info() - extract book structure information from xml
get_filename() - return the filename of the xml document
get_group() - ???
get_readings() - return the variant readings for a variation unit
get_text() - return an iterator containing the text of a section
get_unit_group() - ???
"""
def __init__(self, xml_book_data):
try:
if getattr(xml_book_data, "read", None):
tree = etree.parse(xml_book_data)
else:
tree = etree.parse(open(xml_book_data))
except AttributeError as e:
print e.message
raise TypeError("Book() requires XML data in a file-like object")
self._book = tree.getroot()
self._docinfo = XML_DEFAULT_DOCINFO
self._docinfo.update({i: getattr(tree.docinfo, i) for i in XML_DEFAULT_DOCINFO.keys()})
# dict with keys "doctype", "encoding", and "standalone"
self._structure_info = self._find_book_info()
self.default_delimiter = '.'
def get_book_info(self):
"""
Returns a dictionary with the keys 'book' and 'version'
Returns
----------
dict
A dictionary of information about the current document with the
following keys:
book (string): the title of the current document
version (list): a list of ordered dictionaries, one per language
version. Each version OrderedDict has the following keys:
attributes (OrderedDict): with the keys:
title (string): the readable title of the document version
author (string): the author of the document version
language (string): the language of the document version
fragment (string): ?????
divisions (dict): with the keys:
delimiters (list): a list of unicode strings, each of which
is the character to be used as a delimiter between
reference numbers.
text (list): [u'', u''] ?????
labels (list): a list of unicode strings with the readable
labels used for each organisational level (top-level
first)
organisation_levels (integer): the depth of nested structural
units in the document's primary organization scheme.
manuscripts (list): a list of OrderedDicts, each representing
a manuscript. Each manuscript OrderedDict has the following
keys:
attributes (OrderedDict): with the following keys:
abbrev (unicode): the string short-form used in xml
markup
language (unicode): a string giving the language name
show (unicode): with either 'yes' or 'no' indicating
whether the current mansuscript (text type) should
be shown in running form in the interface or only
in the apparatus.
bibliography (list): a list of dictionaries, each of which
has the keys:
text (unicode): '<NAME> (ed.),' ????
booktitle (list): [] ????
name (dictionary): with the keys:
text (unicode): u'Paris BN gr 2658'
sup (list): []
resources (list): ????
text_structure (OrderedDict): This is a complex OrderedDict
containing a representation of the whole document. Each key
is a complete reference including delimiters (e.g., u'1:1').
Each corresponding value is a dictionary with these keys:
units (list): a list of dictionaries, each representing
one textual variation unit. Each dictionary has
the keys:
parallel (string): to hold the identifier of
any unit that is judged to hold a semantic
parallel to the current one.
group: (string): to hold the identifier of any
larger variation structures of which the
unit is a part.
id (string): the identifier for this variation
unit.
readings (OrderedDict):': In which each key is
a unicode string holding a (space delimited)
set of ms sigla and the value is an
OrderedDict with the information on the text
attested by those mss. The keys of these
ordered dicts are:
attributes (OrderedDict): with the keys:
option (string): number to set order of
display of readings in the apparatus
mss (string): a redundant duplicate
of the string key for this
reading
linebreak (string): ????
indent (string): ????
w (list): to hold parsing info on the words
of the reading ????
text (unicode): The actual text of the
document for this unit according to
these mss.
attributes (list): a list of OrderedDicts representing
the same reference information contained (in string
form) in the key for this structural section. Each
OrderedDict in the list has the keys:
number (string): the reference number for the
current organizational section of the document.
fragment (string): the identifier for a document
fragment if appropriate ????
readings (OrderedDict): ???? appears always empty?
"""
return self._structure_info
def _getattrs(self, element, attrs):
"""
Return an OrderedDict of an element's attributes/values. If the element
does not have a requested attribute, add the attribute with an empty
string as it's value.
Arguments:
element - the element from which we are extracting attributes.
attrs - an iterable containing the names of the attributes we want
to extract from the element. This is usually a complete list
of the element's attributes according to the DTD, but it
doesn't have to be.
"""
attributes = OrderedDict(element.items())
# If any attributes are missing assign them an empty string.
attributes.update((attr, '') for attr in attrs if attr not in attributes)
return attributes
def _find_book_info(self):
"""
Return a dictionary containing information about the book's structure.
"""
info = {'book': self._getattrs(self._book.xpath('/book')[0], ('filename', 'title', 'textStructure')),
'version': []}
# Parse version tags
for version in self._book.xpath('/book/version'):
version_dict = OrderedDict()
version_dict['attributes'] = self._getattrs(version, ('title', 'author', 'fragment', 'language'))
divisions = version.xpath('divisions/division')
version_dict['organisation_levels'] = len(divisions)
version_dict['divisions'] = self._find_divisions_info(divisions)
version_dict['resources'] = self._find_resources_info(version.xpath('resources'))
version_dict['manuscripts'] = self._find_manuscripts_info(version.xpath('manuscripts'))
version_dict['reference_list'] = self._make_reference_list(version.xpath('text')[0],
version_dict['divisions']['delimiters'])
info['version'].append(version_dict)
return info
def _find_divisions_info(self, divisions):
"""
Return a dictionary of lists of <divisions> tag attributes and text.
Arguments:
divisions - a list of <divisions> elements from which we are extracting data.
"""
data = {
'labels': [],
'delimiters': [],
'text': [],
}
delimiters = []
labels = []
text = []
for d in divisions:
# Extract division delimiters and labels
attributes = self._getattrs(d, ('label', 'delimiter'))
labels.append(unicode(attributes['label']))
delimiter = attributes['delimiter']
if delimiter:
delimiters.append(unicode(delimiter))
# Extract text content
text.append(unicode(d.text or ''))
if not delimiters:
try:
delimiters = [self.default_delimiter] * (len(divisions) - 1)
except AttributeError:
delimiters = ['.']
data['labels'] = labels
data['delimiters'] = delimiters
data['text'] = text
return data
def _find_resources_info(self, resources):
"""
Return a list of dictionaries of <resource> tag attributes and text.
Arguments:
resources - a list of <resources> elements from which we are extracting data.
"""
data = []
for res in resources:
res_data = []
for r in res.xpath('resource'):
resource = {'attributes': self._getattrs(r, ('name', )),
'info': [unicode(i.text) for i in r.xpath('info')]}
url = r.xpath('URL')
if url:
resource['url'] = unicode(url[0].text)
else:
resource['url'] = ''
res_data.append(resource)
data.append(res_data)
return data
def _find_manuscripts_info(self, manuscripts):
"""
Return a list of dictionaries of <manuscripts> tag attributes and text.
Arguments:
manuscripts - a list of <manuscripts> elements from which we are extracting data.
"""
data = []
for manuscript in manuscripts:
ms_data = OrderedDict()
for ms in manuscript.xpath('ms'):
ms_dict = {'attributes': OrderedDict(
(attr, unicode(ms.xpath('@%s' % attr)[0]))
for attr in ('abbrev', 'language', 'show')
if ms.xpath('@%s' % attr)), 'name': {}}
name = ms.xpath('name')[0]
if name.text is not None:
ms_dict['name']['text'] = unicode(name.text.strip())
else:
ms_dict['name']['text'] = ''
ms_dict['name']['sup'] = [unicode(s.text.strip())
for s in name.xpath('sup')]
ms_dict['bibliography'] = []
for bib in ms.xpath('bibliography'):
bib_dict = {}
if bib.text:
bib_dict['text'] = unicode(bib.text.strip())
else:
bib_dict['text'] = []
bib_dict['booktitle'] = [unicode(b.text.strip())
for b in bib.xpath('booktitle') if b]
ms_dict['bibliography'].append(bib_dict)
ms_data[ms_dict['attributes']['abbrev']] = ms_dict
data.append(ms_data)
return data
def _make_reference_list(self, text, delimiters):
"""Assemble a flat list of references with the proper delimiters.
Arguments:
text - the <text> element from which we are extracting data.
delimiters - a list of the delimiters used to seperate a document's
divisions
"""
refs = []
for div in text.xpath('div'):
parent_attributes = [self._getattrs(div, ('number', 'fragment'))]
parent_key = unicode(parent_attributes[0]['number'])
print 'div {}, level {}'.format(parent_key, len(delimiters))
if len(div.xpath('div')):
child_refs = self._make_reference_list(div, delimiters[1:])
print 'child refs are {}'.format(child_refs)
mydelimiter = delimiters[0] if len(delimiters) else ''
for ref in child_refs:
refs.append('{}{}{}'.format(parent_key, mydelimiter, ref))
else:
refs.append(parent_key)
return refs
"""
def text_structure_old(self, text, delimiters):
# Extract the div structure from a given text tag.
#FIXME: This is obsolete code
parent = OrderedDict()
for div in text.xpath('div'):
parent_attributes = [self._getattrs(div, ('number', 'fragment'))]
parent_key = unicode(parent_attributes[0]['number'])
child_structure = self.text_structure(div, delimiters[1:])
if child_structure:
for k, v in child_structure.items():
key | |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - "text/plain" Formatter
@copyright: 2000-2002 <NAME> <<EMAIL>>
2007 by <NAME> <<EMAIL>>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.formatter import FormatterBase
class Formatter(FormatterBase):
"""
Send plain text data.
"""
hardspace = u' '
def __init__(self, request, **kw):
FormatterBase.__init__(self, request, **kw)
self._in_code_area = 0
self._in_code_line = 0
self._code_area_state = [0, -1, -1, 0]
self._lists = []
self._url = None
self._text = None # XXX does not work with links in headings!!!!!
self._text_stack = []
self._skip_text = False
self._wrap_skip_text = False
self._textbuf = ''
self._indent = 0
self._listitem_on = []
self._empty_line_count = 2
self._paragraph_ended = False
self._paragraph_skip_begin = True
def startDocument(self, pagename):
line = u'\n'.rjust(len(pagename) + 2, u'*')
return self.wrap(u"%s %s \n%s" % (line, pagename, line))
def endContent(self):
return self.flush(True)
def sysmsg(self, on, **kw):
return self.wrap((u'\n\n*** ', u' ***\n\n')[not on])
def pagelink(self, on, pagename='', page=None, **kw):
FormatterBase.pagelink(self, on, pagename, page, **kw)
if on:
if not self._textbuf or self._textbuf[-1] in ('\n', ' '):
result = self.wrap(u'<')
else:
result = self.wrap(u' <')
self.text_on(True)
self.add_missing_space()
return result
else:
linktext = self._text
self.text_off()
orig_pagename = pagename
if pagename.find('/'):
pagename = pagename.replace('/', '.')
pagename += '.txt'
if linktext == orig_pagename:
return self.wrap(u'%s>' % pagename)
else:
return self.wrap(u'%s> [%s]' % (linktext, pagename))
def interwikilink(self, on, interwiki='', pagename='', **kw):
if on:
self.add_missing_space()
self._url = u"%s:%s" % (interwiki, pagename)
self.text_on()
return u''
else:
text = self._text
self.text_off()
if text == self._url:
result = ''
else:
result = self.wrap(u' [%s]' % (self._url))
self._url = None
return result
def url(self, on, url='', css=None, **kw):
if on:
self.add_missing_space()
self._url = url
self.text_on()
return u''
else:
text = self._text
self.text_off()
if text == self._url or 'mailto:' + text == self._url:
result = ''
else:
result = self.wrap(u' [%s]' % (self._url))
self._url = None
return result
def attachment_link(self, on, url=None, **kw):
if on:
if 'title' in kw and kw['title']:
if kw['title'] != url:
return self.wrap(u'[attachment:%s ' % url)
return self.wrap(u'[attachment:')
return self.wrap(']')
def attachment_image(self, url, **kw):
title = ''
for a in (u'title', u'html__title', u'alt', u'html_alt'):
if a in kw:
title = ':' + kw[a]
return self.wrap("[image:%s%s]" % (url, title))
def attachment_drawing(self, url, text, **kw):
return self.wrap("[drawing:%s]" % text)
def text(self, text, **kw):
if self._text is not None:
self._text += text
if self._wrap_skip_text:
return ''
return self.wrap(text)
def rule(self, size=0, **kw):
size = min(size, 10)
ch = u"---~=*+#####"[size]
self.paragraph_begin()
result = self.wrap((ch * (79 - self._indent)))
self.paragraph_end()
return result
def strong(self, on, **kw):
if on:
self.add_missing_space()
return self.wrap(u'*')
def emphasis(self, on, **kw):
if on:
self.add_missing_space()
return self.wrap(u'/')
def highlight(self, on, **kw):
return u''
def number_list(self, on, type=None, start=None, **kw):
if on:
if self._lists:
# No empty lines between sublists
self._paragraph_ended = False
self.paragraph_begin()
self._lists.append(0)
self._listitem_on.append(False)
elif self._lists:
self.paragraph_end()
num = self._lists.pop()
listitem_on = self._listitem_on.pop()
if listitem_on:
prefix = ' %d. ' % (num)
self._indent -= len(prefix)
return ''
def bullet_list(self, on, **kw):
if on:
if self._lists:
# No empty lines between sublists
self._paragraph_ended = False
self.paragraph_begin()
self._lists.append(-1)
self._listitem_on.append(False)
else:
self.paragraph_end()
self._lists.pop()
listitem_on = self._listitem_on.pop()
if listitem_on:
self._indent -= 3
return ''
def listitem(self, on, **kw):
self._paragraph_ended = False
if not on:
# we can't rely on this
self.paragraph_end()
return ''
result = ''
num = self._lists.pop()
listitem_on = self._listitem_on.pop()
if listitem_on and on:
# we didn't receive on=False for previous listitem
self.paragraph_end()
if num >= 0:
prefix = ' %d. ' % (num)
self._indent -= len(prefix)
else:
self._indent -= 3
if num >= 0:
num += 1
prefix = ' %d. ' % (num)
else:
# FIXME: also before tables, at leat in LDA.Sieve.txt
prefix = ' * '
self._lists.append(num)
self._listitem_on.append(on)
result += self.wrap(prefix)
self._indent += len(prefix)
self._paragraph_skip_begin = True
return result
def sup(self, on, **kw):
if on:
return self.wrap(u'^')
else:
return ''
def sub(self, on, **kw):
return self.wrap(u'_')
def strike(self, on, **kw):
if on:
self.add_missing_space()
return self.wrap(u'__')
def code(self, on, **kw):
if on:
self.add_missing_space()
return self.wrap(u"'")
def preformatted(self, on, **kw):
FormatterBase.preformatted(self, on)
snip = u'%s\n' % u'---%<'.ljust(78 - self._indent, u'-')
if on:
self.paragraph_begin()
return self.wrap(snip)
else:
if self._textbuf and not self._textbuf.endswith('\n'):
self._textbuf += '\n'
result = self.wrap(snip)
self.paragraph_end()
return result
def small(self, on, **kw):
if on:
self.add_missing_space()
return u''
def big(self, on, **kw):
if on:
self.add_missing_space()
return u''
def code_area(self, on, code_id, code_type='code', show=0, start=-1,
step=-1, msg=None):
snip = u'%s\n' % u'---CodeArea'.ljust(78 - self._indent, u'-')
if on:
self.paragraph_begin()
self._in_code_area = 1
self._in_code_line = 0
self._code_area_state = [show, start, step, start]
return self.wrap(snip)
else:
if self._in_code_line:
return self.wrap(self.code_line(0) + snip)
result = self.wrap(snip)
self.paragraph_end()
return result
def code_line(self, on):
res = u''
if not on or (on and self._in_code_line):
res += u'\n'
if on:
if self._code_area_state[0] > 0:
res += u' %4d ' % self._code_area_state[3]
self._code_area_state[3] += self._code_area_state[2]
self._in_code_line = on != 0
return self.wrap(res)
def code_token(self, on, tok_type):
return ""
def add_missing_space(self):
if self._textbuf and self._textbuf[-1].isalnum():
self._textbuf += ' '
def paragraph(self, on, **kw):
FormatterBase.paragraph(self, on)
if on:
self.paragraph_begin()
else:
self.paragraph_end()
return ''
def linebreak(self, preformatted=1):
return self.wrap(u'\n')
def smiley(self, text):
return self.wrap(text)
def heading(self, on, depth, **kw):
if on:
self.paragraph_begin()
self.text_on()
result = ''
else:
if depth == 1:
ch = u'='
else:
ch = u'-'
result = u'\n%s\n' % (ch * len(self._text))
self.text_off()
result = self.wrap(result)
self.paragraph_end()
return result
def get_table_sep(self, col_widths):
result = ''
for width in col_widths:
result += '+' + ('-' * width)
return result + '+\n'
def fix_col_widths(self):
min_widths = self._table_column_min_len
max_widths = self._table_column_max_len
max_len = sum(max_widths)
# take the needed space equally from all columns
count = len(max_widths)
idx, skip = 0, 0
available_len = 79 - count - 1
while max_len > available_len:
if max_widths[idx] > min_widths[idx]:
max_widths[idx] -= 1
max_len -= 1
skip = 0
else:
skip += 1
if skip == count:
# there are only too wide columns
break
if idx == count - 1:
idx = 0
else:
idx += 1
return max_widths
def table(self, on, attrs={}, **kw):
if on:
self._table = []
self._table_column_min_len = []
self._table_column_max_len = []
result = self.flush(True)
else:
result = u''
col_widths = self.fix_col_widths()
for row in self._table:
result += self.get_table_sep(col_widths)
more = True
while more:
more = False
num = 0
result += '|'
for col in row:
# break at next LF
lf_idx = col.find('\n')
if lf_idx != -1:
more = True
col_len = lf_idx
next_idx = lf_idx + 1
else:
col_len = len(col)
next_idx = col_len
# possibly break earlier if we need to wrap
if col_len > col_widths[num]:
idx = col.rfind(' ', 0, col_widths[num])
if idx == -1:
idx = col.find(' ', col_widths[num])
if idx != -1:
col_len = idx
next_idx = idx + 1
more = True
result += ' ' + col[:col_len]
result += (' ' * (col_widths[num] - col_len - 1)) + '|'
row[num] = col[next_idx:]
num += 1
result += '\n'
result += self.get_table_sep(col_widths)
self._table = None
self._table_column_min_len = None
self._table_column_max_len = None
self._empty_line_count = 0
self.paragraph_end()
return result
def table_row(self, on, attrs={}, **kw):
if on:
self._table.append([])
return u''
def table_cell(self, on, attrs={}, **kw):
if on:
self.text_on()
self._wrap_skip_text = True
else:
# keep track of the longest word and the longest line in the cell
self._text = self._text.strip()
max_line_len = 0
max_word_len = 0
for line in self._text.split('\n'):
if len(line) > max_line_len:
max_line_len = len(line)
for word in self._text.split(' '):
if len(word) > max_word_len:
max_word_len = len(word)
# one preceding and trailing cell whitespace
max_word_len += 2
max_line_len += 2
rownum = len(self._table) - 1
colnum = len(self._table[rownum])
if len(self._table_column_max_len) <= colnum:
self._table_column_min_len.append(max_word_len)
self._table_column_max_len.append(max_line_len)
else:
if max_word_len > self._table_column_min_len[colnum]:
self._table_column_min_len[colnum] = max_word_len
if self._table_column_max_len[colnum] < max_line_len:
self._table_column_max_len[colnum] = max_line_len
self._table[rownum].append(self._text)
self.text_off()
return u''
def underline(self, on, **kw):
return self.wrap(u'_')
def definition_list(self, on, **kw):
if on:
self.paragraph_begin()
else:
self.paragraph_end()
return u''
def definition_term(self, on, compact=0, **kw):
result = u''
#if not compact:
# result = result + u'\n'
if not on:
result = result + u':'
return self.wrap(result)
def definition_desc(self, on, **kw):
if on:
self._indent | |
<reponame>zimolzak/wav-in-python
import numpy as np
import matplotlib.pyplot as plt
import wave # so we can refer to its classes in type hint annotations
from scipy import signal
from typing import Generator
import collections
from printing import pretty_hex_string, ints2dots
def bytes2int_list(byte_list: bytes) -> Generator[int, None, None]:
"""Input a 'bytes' object. Add pairs of bytes together & yield generator of ints.
:param byte_list: bytes object, like b'#\xff^\xff', usually right out of readframes()
:return: Yield decoded values (integers 0 to 65535).
"""
# fixme - there may be a pre-made "decode" way to do this.
for n, b in enumerate(byte_list):
if n % 2 == 0:
continue
else:
# yield 256 * byte_list[n - 1] + byte_list[n] # the other endian
raw_int = 256 * byte_list[n] + byte_list[n - 1]
midpoint = 2 ** 15
if raw_int >= midpoint:
scaled_int = raw_int - midpoint
else:
scaled_int = raw_int + midpoint
yield scaled_int
# indexing or list() on a 'bytes' obj auto-converts to 'int'
def run_length_to_bitstream(rl: np.ndarray, values: np.ndarray, v_high: int, v_low: int) -> np.ndarray:
"""Do run length DECODING and map low/high signal to logic 0/1.
Supposed to leave middle values untouched.
[1,2,1,1,1] [7,1,7,1,5] -->
[1 0 0 1 0 5]
:param rl: Array of run lengths
:param values: Array of corresponding values (positive ints)
:param v_high: Value that will be mapped to 1
:param v_low: Value that will be mapped to 0
:return: Array of hopefully only {0,1} with runs re-expanded.
:raises: ValueError if rl not exactly same size as values.
"""
rl = np.asarray(rl) # so that technically it works on lists
values = np.asarray(values)
if rl.shape != values.shape:
raise ValueError("rl and values shapes unequal: %s %s" % (str(rl.shape), str(values.shape)))
high_shifts = np.where(values == v_high, 1 - v_high, 0)
low_shifts = np.where(values == v_low, 0 - v_low, 0)
values_edited = values + high_shifts + low_shifts
# fixme exception (or warn?) if values not in the set {v_high, v_low}
return np.repeat(values_edited, rl) # requires ints in rl, not floats
def square_up(a: np.ndarray, v_high: int, v_low: int, tolerance: int = 1) -> np.ndarray:
"""Take all elements close to v_high, and nudge them equal to v_high. Same for v_low.
Makes a nearly square wave into a very square wave.
Supposed to leave middle ones untouched.
[1 1 1 1 2 7 7 7 7 6 7 7 7 5 ] -->
1 1 1 1 1 7 7 7 7 7 7 7 7 5
:param a: Array of values (usually time series)
:param v_high: High value to nudge to
:param v_low: Low value to nudge to
:param tolerance: How much are you allowed to nudge?
:return: Array of squared-up values
:raises: ValueError: if intervals overlap
"""
if min(v_high + tolerance, v_low + tolerance) >= max(v_high - tolerance, v_low - tolerance):
raise ValueError("Nudging intervals overlap: %f and %f +/- %f" % (v_low, v_high, tolerance))
is_high = abs(a - v_high) <= tolerance
is_low = abs(a - v_low) <= tolerance
fixed1 = np.where(is_high, v_high, a)
return np.where(is_low, v_low, fixed1)
def rle(a: np.ndarray) -> tuple:
"""Perform run-length encoding
:param a: Array of arbitrary numbers, presumably with some repetition.
:return: Array of run lengths, and array of numbers corresponding to those runs.
"""
# https://newbedev.com/find-length-of-sequences-of-identical-values-in-a-numpy-array-run-length-encoding
ia = np.asarray(a)
n = len(ia)
if n == 0:
return None, None
else:
there_is_transition = ia[1:] != ia[:-1] # pairwise unequal (string safe)
transition_locations = np.append(np.where(there_is_transition), n - 1) # must include last element pos
run_lengths = np.diff(np.append(-1, transition_locations))
# p = np.cumsum(np.append(0, run_lengths))[:-1] # positions
return run_lengths, ia[transition_locations]
class WaveData:
"""Wrap a Wave_read object with awareness of baud and its sample values."""
def __init__(self, wav_file: wave.Wave_read,
start_sample: int = 0, n_symbols_to_read: int = 750, baud: int = 50) -> None:
"""Decode a portion of an open WAV file to bytes and integer samples.
Example:
W = WaveData(fh)
W.int_list -> [32547, 32606, 32964, 33108, ...]
:param wav_file: Object opened by wave.open() but not yet read
:param start_sample: Where in the file to start reading
:param n_symbols_to_read: How many FSK symbols to read. `None` to read whole file.
:param baud: Rate of FSK symbols per second
"""
self.wav_file = wav_file
self.baud = baud
# Derived and calculated vars
self.sample_rate = wav_file.getframerate()
self.bytes_per_sample = wav_file.getsampwidth()
self.samples_per_symbol = self.sample_rate / baud
if n_symbols_to_read is not None:
n_samples_to_read = int(self.samples_per_symbol * n_symbols_to_read)
else:
n_samples_to_read = wav_file.getnframes()
# Read from file
wav_file.setpos(start_sample)
self.wav_bytes = wav_file.readframes(n_samples_to_read) # important op, maybe catch exceptions?
# Usual results
self.n_samples_actually_read = len(self.wav_bytes) / self.bytes_per_sample
self.n_symbols_actually_read = self.n_samples_actually_read / self.sample_rate * baud
self.int_list = list(bytes2int_list(self.wav_bytes))
def print_summary(self, n_samples_to_plot: int = 15) -> None:
"""Show reasonable data and metadata from a WAV file, in plain text.
:param n_samples_to_plot: How many WAV samples to display (as numbers and a text graph)
"""
char_per_byte = 2 # That means hex chars. 1 B = 2 hex digits '01' or '0F' etc.
n_bytes_to_plot = n_samples_to_plot * self.bytes_per_sample
# objects for printing
pretty_hex_list = list(pretty_hex_string(self.wav_bytes.hex()))
dot_list = list(ints2dots(self.int_list))
print("\n\n# WAV file information\n")
print("Params:\n", self.wav_file.getparams())
print()
print("File duration (s) =", self.wav_file.getnframes() / self.sample_rate)
print("Samples / FSK symbol =", self.samples_per_symbol)
print("Bytes in %f FSK symbols =" % self.n_symbols_actually_read, len(self.wav_bytes))
print("Seconds read =", self.n_samples_actually_read / self.sample_rate)
print()
print("First %i bytes (%i samples):" % (n_bytes_to_plot, n_samples_to_plot))
print(self.wav_bytes[:n_bytes_to_plot])
print()
print(''.join(pretty_hex_list[:n_bytes_to_plot * char_per_byte])) # pretty hex list
print()
print(self.int_list[:n_samples_to_plot]) # int list
print()
print('\n'.join(dot_list[:n_samples_to_plot])) # dot list
class Fourier:
def __init__(self, wave_data: WaveData, seg_per_symbol: int = 3) -> None:
"""Represent results of short-time Fourier transform applied to WAV audio, including spectrogram of max
intensity frequency over time. Converts high-resolution sample time series to medium-resolution frequency
time-series.
Example:
F = Fourier(W)
F.max_freq_indices -> [1 1 7 6 7 7 7 7 1 1]
...where "1" means 600 Hz, and "7" means 1500 Hz.
:param wave_data: Object containing list of WAV numeric samples to be processed.
:param seg_per_symbol: How many FT segments are calculated for each FSK symbol.
"""
self.n_symbols_actually_read = wave_data.n_symbols_actually_read
samples_per_symbol = wave_data.sample_rate / wave_data.baud
self.f, self.t, self.Zxx = signal.stft(wave_data.int_list, fs=wave_data.sample_rate,
nperseg=int(samples_per_symbol / seg_per_symbol)) # important
# Zxx's first axis is freq, second is times
self.max_freq_indices = self.Zxx.argmax(0) # Main output: vector of which freq band is most intense, per time
# fixme - it is possible I don't understand the "nperseg" parameter.
def apply_passband(self, lo_freq: float = 400, hi_freq: float = 2000) -> None:
"""Retain only certain rows (frequencies) in the FT and other result matrices/vectors.
:param lo_freq: Lower cutoff frequency (below this will be blocked)
:param hi_freq: Higher cutoff frequency
"""
selected_indices = ((lo_freq < self.f) * (self.f < hi_freq))
self.f = self.f[selected_indices]
self.Zxx = np.abs(self.Zxx[selected_indices])
self.max_freq_indices = self.Zxx.argmax(0)
def print_summary(self):
"""Show data/metadata on STFT results."""
print("\n\n# Fourier analysis of FSK\n")
print("Zxx (FFT result) shape, frequencies * time points:", self.Zxx.shape)
print("FFT frequencies in pass band:", self.f)
print("\nFrequency bin values over time:")
print(self.max_freq_indices)
def save_plot(self, filename: str) -> None:
"""Render a spectrogram of the complete STFT of WAV data.
:param filename: Name of the image file where the plot will be saved
"""
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html
z_max = np.max(self.Zxx) # global max just used for plot scale
plt.pcolormesh(self.t, self.f, self.Zxx, vmin=0, vmax=z_max, shading='gouraud')
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.savefig(filename)
# plt.show()
# By spec: FSK shift of 850 Hz. Mine by inspection is about 581 Hz and 1431 Hz
# one symbol is about 450 - 470 samples by inspection
# calculated at 441 samples/symbol
# 11.62 cycles in a low freq symbol, 28.62 in high freq.
class Bitstream:
def __init__(self, fourier: Fourier) -> None:
"""Convert the medium-resolution frequency time series to low resolution bitstream (FSK symbol time series).
Often input in fourier.max_freq_indices is like this:
array([0, 7, 7, 7, 7, 7, 6, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 6, 1, 1, 1, 1, 1])
B = Bitstream(F)
B.stream -> [1, 0, 1, 0]
:param fourier: Object containing array of max intensity frequency over time.
"""
# elements (segments) per symbol is a critical param.
# In | |
<gh_stars>10-100
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class Plane(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| Plane
|
| Represents the hybrid shape Plane feature object.
| Role: Declare hybrid shape Plane root feature object. All interfaces for
| different type of Plane derives HybridShapePlane.
|
| Use the CATIAHybridShapeFactory to create a HybridShapePlane
| objects.
|
| See also:
| HybridShapeFactory
"""
def __init__(self, com_object):
super().__init__(com_object)
self.plane = com_object
def get_first_axis(self, o_first_axis: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetFirstAxis(CATSafeArrayVariant oFirstAxis)
|
| Returns the coordinates of the first plane axis.
|
| Parameters:
|
| oFirstAxis[0]
| The X Coordinate of the first plane axis
| oFirstAxis[1]
| The Y Coordinate of the first plane axis
| oFirstAxis[2]
| The Z Coordinate of the first plane axis
|
| See also:
| HybridShapeFactory
:param tuple o_first_axis:
:return: None
:rtype: None
"""
return self.plane.GetFirstAxis(o_first_axis)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_first_axis'
# # vba_code = """
# # Public Function get_first_axis(plane)
# # Dim oFirstAxis (2)
# # plane.GetFirstAxis oFirstAxis
# # get_first_axis = oFirstAxis
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_origin(self, o_origin: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetOrigin(CATSafeArrayVariant oOrigin)
|
| Returns the origin of the plane.
|
| Parameters:
|
| oOrigin[0]
| The X Coordinate of the plane origin
| oOrigin[1]
| The Y Coordinate of the plane origin
| oOrigin[2]
| The Z Coordinate of the plane origin
|
| See also:
| HybridShapeFactory
:param tuple o_origin:
:return: None
:rtype: None
"""
return self.plane.GetOrigin(o_origin)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_origin'
# # vba_code = """
# # Public Function get_origin(plane)
# # Dim oOrigin (2)
# # plane.GetOrigin oOrigin
# # get_origin = oOrigin
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_position(self, o_x: float, o_y: float, o_z: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetPosition(double oX,
| double oY,
| double oZ)
|
| Gets the position where the plane is displayed.
|
| Parameters:
|
| oX
| X coordinates
| oY
| Y coordinates
| oZ
| Z coordinates
|
| Returns:
| S_OK if the position has been set before, E_FAIL else.
:param float o_x:
:param float o_y:
:param float o_z:
:return: None
:rtype: None
"""
return self.plane.GetPosition(o_x, o_y, o_z)
def get_second_axis(self, o_second_axis: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetSecondAxis(CATSafeArrayVariant oSecondAxis)
|
| Returns the coordinates of the second plane axis.
|
| Parameters:
|
| oSecondAxis[0]
| The X Coordinate of the second plane axis
| oSecondAxis[1]
| The Y Coordinate of the second plane axis
| oSecondAxis[2]
| The Z Coordinate of the second plane axis
|
| See also:
| HybridShapeFactory
:param tuple o_second_axis:
:return: None
:rtype: None
"""
return self.plane.GetSecondAxis(o_second_axis)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_second_axis'
# # vba_code = """
# # Public Function get_second_axis(plane)
# # Dim oSecondAxis (2)
# # plane.GetSecondAxis oSecondAxis
# # get_second_axis = oSecondAxis
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def is_a_ref_plane(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func IsARefPlane() As long
|
| Queries whether the plane is a reference plane (fixed axis
| plane).
|
| Returns:
| 0 when the plane is a reference plane, 1 else.
:return: int
:rtype: int
"""
return self.plane.IsARefPlane()
def put_first_axis(self, i_first_axis: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutFirstAxis(CATSafeArrayVariant iFirstAxis)
|
| Sets the first axis. The first plane axis must be a point-direction
| line.
| Note: This method can only be used on CATIAHybridShapePlane2Lines
| feature
|
| Parameters:
|
| iFirstAxis[0]
| The X Coordinate of the first plane axis
| iFirstAxis[1]
| The Y Coordinate of the first plane axis
| iFirstAxis[2]
| The Z Coordinate of the first plane axis
|
| See also:
| HybridShapeFactory
:param tuple i_first_axis:
:return: None
:rtype: None
"""
return self.plane.PutFirstAxis(i_first_axis)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_first_axis'
# # vba_code = """
# # Public Function put_first_axis(plane)
# # Dim iFirstAxis (2)
# # plane.PutFirstAxis iFirstAxis
# # put_first_axis = iFirstAxis
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def put_origin(self, i_origin: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub PutOrigin(CATSafeArrayVariant iOrigin)
|
| Sets the origin of the plane.
| Note: This method can only be used on CATIAHybridShapePlane2Lines
| feature
|
| Parameters:
|
| iOrigin[0]
| The X Coordinate of the plane origin
| iOrigin[1]
| The Y Coordinate of the plane origin
| iOrigin[2]
| The Z Coordinate of the plane origin
|
| See also:
| HybridShapeFactory
:param tuple i_origin:
:return: None
:rtype: None
"""
return self.plane.PutOrigin(i_origin)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'put_origin'
| |
<reponame>shubav/sonic-mgmt
import re
from spytest.utils import filter_and_select
from spytest import st, utils
import apis.system.port as port1
from apis.system.rest import get_rest,delete_rest,config_rest
from utilities.utils import get_interface_number_from_name
def config_bgp_evpn(dut, **kwargs):
"""
Author: <NAME> (<EMAIL>)
config_bgp_evpn(dut=data.dut1,neighbor ='172.16.31.10',remote_as='20',config='yes',config_type_list =["activate"])
config_bgp_evpn(dut=dut1,config = 'yes',config_type_list=["advertise_all_vni"],local_as="10")
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_rd="8:8",config="yes",local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_both_rt="50:50",config="no", local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_import_rt="51:50",config="yes", local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_export_rt="52:50",config="yes", local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_rd="8:8",config="no", local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=dut1,config_type_list=["vrf_rd_rt"],vrf_name="Vrf1",l3_rd="9:9",l3_both_rt="50:50",config="no",local_as=evpn_dict["leaf3"]['local_as'])
config_bgp_evpn(dut=data.dut1,neighbor ='172.16.31.10',remote_as='20',config='yes',config_type_list =["activate"], cli_type='klish')
config_bgp_evpn(dut=dut1,config = 'yes',config_type_list=["advertise_all_vni"],local_as="10", cli_type='klish')
Configure bgp l2vpn evpn specific commands
:param dut:
:param neighbor:
:param local_as:
:param config_type_list:
:param allowas_in:
:param attribute_unchanged:
:param route_map:
:param direction:
:param network:
:param rd:
:param vni:
:param vrf_name:
:param l3_vni_id:
:param ethtag:
:param bgp_label:
:param esi_id:
:param gw_ip:
:param router_mac:
:return:
"""
cli_type = kwargs.pop('cli_type', st.get_ui_type(dut,**kwargs))
if cli_type == 'click': cli_type = "vtysh"
skip_rest_cfg_type_list = [ 'nexthop_self', 'route_map', 'allowas_in', 'network', 'route_target', 'autort',
'attribute_unchanged', 'default_originate_ipv4', 'default_originate_ipv6',
'default_originate_ipv4_vrf', 'default_originate_ipv6_vrf',
'dup_addr_detection', 'flooding_disable', 'flooding_head_end_replication',
"route_server_client", "route_reflector_client" ]
if 'config' in kwargs:
config = kwargs['config']
else:
config = 'yes'
if 'vrf_name' in kwargs:
vrf_name = kwargs['vrf_name']
else:
vrf_name = "default"
if 'l3_vni_id' in kwargs:
l3_vni_id = kwargs['l3_vni_id']
if 'vtep_name' in kwargs:
vtep_name = kwargs['vtep_name']
if 'config_type_list' in kwargs:
config_type_list = kwargs['config_type_list']
if 'neighbor' in kwargs:
neighbor = kwargs['neighbor']
if 'peergroup' in kwargs and 'neighbor' not in kwargs:
neighbor = kwargs['peergroup']
if 'addr_family' in kwargs:
addr_family = kwargs['addr_family']
else:
addr_family = 'l2vpn'
if 'addr_family_modifier' in kwargs:
addr_family_modifier = kwargs['addr_family_modifier']
else:
addr_family_modifier = "evpn"
st.log('Configure BGP L2VPN address family')
addr_family_str = addr_family.upper() + '_' + addr_family_modifier.upper()
if cli_type in ['rest-put','rest-patch']:
st.banner("CFG list: {}, cli_type:{}".format(config_type_list,cli_type))
for cfg_type in config_type_list:
if cfg_type in skip_rest_cfg_type_list:
cli_type = 'klish'
st.banner("CFG type skipped: {}, cli_type:{}".format(cfg_type, cli_type))
break
if cli_type in ['rest-put','rest-patch']:
rest_urls = st.get_datastore(dut, "rest_urls")
if config.lower() == 'yes' and 'vrf_vni' not in config_type_list:
if 'local_as' in kwargs:
### AS URI
url = rest_urls['bgp_as_config'].format(vrf_name)
payload = {'openconfig-network-instance:as': int(kwargs['local_as'])}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: BGP local-as config Failed')
return False
### L2VPN global URI
url = rest_urls['bgp_l2vpn_global_config'].format(vrf_name)
payload = { 'openconfig-network-instance:afi-safis': {
'afi-safi': [
{'afi-safi-name': addr_family_str,
'config':{
'afi-safi-name': addr_family_str,
}
}
]
}}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: BGP {} address-family global config Failed'.format(addr_family_str))
return False
else:
if 'local_as' in kwargs:
my_cmd = 'router bgp {}\n'.format(kwargs['local_as'])
else:
my_cmd = 'router bgp\n'
my_cmd += 'address-family {} {}\n'.format(addr_family,addr_family_modifier)
if 'allowas_in' in kwargs:
allowas_in = kwargs['allowas_in']
if 'attribute_unchanged' in kwargs:
attribute_unchanged = kwargs['attribute_unchanged']
if 'route_map' in kwargs:
route_map = kwargs['route_map']
if 'direction' in kwargs:
direction = kwargs['direction']
else:
direction = 'in'
if 'advertise_ipv4' in kwargs:
advertise_ipv4 = kwargs['advertise_ipv4']
if 'advertise_ipv6' in kwargs:
advertise_ipv6 = kwargs['advertise_ipv6']
if 'advertise_ipv4_vrf' in kwargs:
advertise_ipv4 = kwargs['advertise_ipv4_vrf']
if 'advertise_ipv6_vrf' in kwargs:
advertise_ipv6 = kwargs['advertise_ipv6_vrf']
if 'dup_addr_detection' in kwargs:
dup_addr_detection = kwargs['dup_addr_detection']
if 'network' in kwargs:
network = kwargs['network']
rd = kwargs['rd']
ethtag = kwargs['ethtag']
bgp_label = kwargs['bgp_label']
esi_id = kwargs['esi_id']
gw_ip = kwargs['gw_ip']
router_mac = kwargs['router_mac']
if config.lower() == 'yes':
config_cmd = ''
elif config.lower() == 'remove_vrf':
config_cmd = 'remove_vrf'
elif config.lower() == 'remove_vni':
config_cmd = 'remove_vni'
else:
config_cmd = 'no'
if 'vni_unconfig' not in kwargs:
vni_unconfig = ''
elif kwargs['vni_unconfig'] == "yes":
vni_unconfig = 'no'
for type1 in config_type_list:
cur_type = type1
if type1 == 'vrf_vni' and config_cmd == '':
if cli_type in ['klish','rest-put','rest-patch']:
map_vrf_vni(dut, vrf_name, l3_vni_id, config='yes', vtep_name=vtep_name, cli_type=cli_type)
my_cmd = ''
else:
my_cmd = ''
my_cmd += 'vrf {} \n'.format(vrf_name)
my_cmd += 'vni {} \n'.format(l3_vni_id)
elif type1 == 'vrf_vni' and config_cmd != '':
my_cmd = ''
if cli_type in ['klish','rest-put','rest-patch']:
if config_cmd == 'remove_vrf' or config_cmd == 'remove_vni' or config_cmd == 'no':
map_vrf_vni(dut, vrf_name, l3_vni_id, config='no', vtep_name=vtep_name, cli_type=cli_type)
my_cmd = ''
else:
if config_cmd == 'remove_vrf':
my_cmd += 'no vrf {} \n'.format(vrf_name)
if config_cmd == 'remove_vni' or config_cmd == 'no':
my_cmd += 'vrf {} \n'.format(vrf_name)
my_cmd += 'no vni {} \n'.format(l3_vni_id)
elif type1 == 'activate':
if cli_type == 'klish':
neigh_name = get_interface_number_from_name(neighbor)
if isinstance(neigh_name, dict):
my_cmd += "neighbor interface {} {}\n".format(neigh_name["type"],neigh_name["number"])
else:
my_cmd += "neighbor {}\n".format(neigh_name)
my_cmd += "remote-as {}\n".format(kwargs['remote_as'])
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} activate\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
elif cli_type in ['click','vtysh']:
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} neighbor {} activate\n'.format(config_cmd, neighbor)
elif cli_type in ['rest-put','rest-patch']:
if config.lower() == 'yes':
st.log("BGP EVPN neigh config")
url = rest_urls['bgp_neighbor_config'].format(vrf_name)
if kwargs['remote_as'] == 'external':
payload = {'openconfig-network-instance:neighbors':
{'neighbor': [
{'neighbor-address': neighbor,
'config': {
'neighbor-address': neighbor,
'peer-type': kwargs['remote_as'].upper(),
'enabled': bool(1)
}
}
]}
}
else:
payload = {'openconfig-network-instance:neighbors':
{'neighbor': [
{'neighbor-address': neighbor,
'config': {
'neighbor-address': neighbor,
'peer-as': int(kwargs['remote_as']),
'enabled': bool(1)
}
}
]}
}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: BGP EVPN neighbor configuration Failed')
return False
url = rest_urls['bgp_l2vpn_neighbor_config'].format(vrf_name,neighbor)
payload = {'openconfig-network-instance:afi-safis': {
'afi-safi':[
{
'afi-safi-name': addr_family_str,
'config':{
'afi-safi-name': addr_family_str,
'enabled': True
}
}
]}
}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: BGP {} address-family configuration Failed'.format(addr_family_str))
return False
else:
url = rest_urls['bgp_l2vpn_neighbor_config'].format(vrf_name, neighbor)
payload = {'openconfig-network-instance:afi-safis': {
'afi-safi':[
{
'afi-safi-name': addr_family_str,
'config':{
'afi-safi-name': addr_family_str,
'enabled': False
}
}
]}
}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: BGP {} address-family no activate Failed'.format(addr_family_str))
return False
elif type1 == 'allowas_in':
#convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} allowas-in\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
else:
my_cmd += '{} neighbor {} allowas-in {}\n'.format(config_cmd,neighbor,allowas_in)
elif type1 == 'attribute_unchanged':
#convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} attribute-unchanged\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
else:
my_cmd += '{} neighbor {} attribute-unchanged {}\n'.format(config_cmd,neighbor,attribute_unchanged)
elif type1 == 'nexthop_self':
#convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} next-hop-self\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
else:
my_cmd += '{} neighbor {} next-hop-self\n'.format(config_cmd, neighbor)
elif type1 == 'route_map':
# convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} route-map {} {}\n".format(config_cmd,route_map,direction)
my_cmd += "exit\n"
my_cmd += "exit\n"
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
else:
my_cmd += '{} neighbor {} route-map {} {}\n'.format(config_cmd,neighbor,route_map,direction)
my_cmd += 'exit\n'
elif type1 == 'route_reflector_client':
# convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} route-reflector-client\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
else:
my_cmd += '{} neighbor {} route-reflector-client\n'.format(config_cmd, neighbor)
elif type1 == 'route_server_client':
# convert to REST as and when used
if cli_type == 'klish':
my_cmd += "neighbor {}\n".format(neighbor)
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += "{} route-server-client\n".format(config_cmd)
my_cmd += "exit\n"
my_cmd += "exit\n"
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
else:
my_cmd += '{} neighbor {} route-server-client\n'.format(config_cmd, neighbor)
elif type1 == 'disable_ebgp_connected_route_check':
if cli_type == 'klish':
my_cmd += '{} disable-ebgp-connected-route-check \n'.format(config_cmd)
my_cmd += "exit\n"
elif cli_type in ['click','vtysh']:
my_cmd += '{} bgp disable-ebgp-connected-route-check \n'.format(config_cmd)
elif cli_type in ['rest-put','rest-patch']:
url = rest_urls['ebgp_connected_route_check'].format(vrf_name)
if config.lower() == 'yes':
payload = {'openconfig-bgp-ext:disable-ebgp-connected-route-check': True}
elif config.lower() == 'no':
payload = {'openconfig-bgp-ext:disable-ebgp-connected-route-check': False}
response = config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload)
if not response:
st.banner('FAIL-OCYANG: disable-ebgp-connected-route-check configuration:{} Failed'.format(config_cmd))
return False
else:
st.error("Invalid CLI type - {}".format(cli_type))
return False
elif type1 == 'advertise_ipv4':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:advertise-list': ['IPV4_UNICAST']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv4 in vrf:{} config Failed'.format(vrf_name))
return False
elif config.lower() == 'no':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv4 in vrf:{} delete Failed'.format(vrf_name))
return False
else:
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} advertise ipv4 {}\n'.format(config_cmd,advertise_ipv4)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'advertise_ipv6':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:advertise-list': ['IPV6_UNICAST']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv6 in vrf:{} config Failed'.format(vrf_name))
return False
elif config.lower() == 'no':
url | |
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
data = json.dumps({"bid_until": "2999-12-24 23:45:10"})
self.app.put('/product/' + str(self.product_id) + "/bidup", data=data, content_type='application/json')
data = json.dumps({"bid": "999.99"})
r_json = self.app.post('/bid/' + str(self.product_id), data=data, content_type='application/json').get_json()
self.assertIn('Successful bid with ' + str(999.99), str(r_json)) # Check bids
r_json = self.app.get('/bid/' + str(self.product_id)).get_json()
self.assertIn('999.99', str(r_json)) # Check bid with the bid
#@<EMAIL>
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class TradesProducts(unittest.TestCase):
<EMAIL>
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.seller_id = self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.seller_id) + '/mod')
self.buyer_id = self.user_id = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
# Post product
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
self.app.get('/logout')
<EMAIL>
def test_trades(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
r_json = self.app.post('/trade', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful trade created
trade_id = r_json["message"]
json_data = json.dumps({
"price": "99.9",
"products": [],
})
r_json = self.app.post('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful new offer', str(r_json)) # Check create offer
json_data = json.dumps({
"price": "22.9",
"products": [],
})
r_json = self.app.put('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful offer update', str(r_json)) # Check update
r_json = self.app.get('/trades').get_json()
self.assertIn('\'length\': ', str(r_json)) # Check list trades
r_json = self.app.get('/trade/' + str(trade_id)).get_json()
self.assertIn('\'seller_id\': ' + str(self.seller_id), str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm', str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success unconfirm', str(r_json)) # Check get info
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm', str(r_json)) # Check get info
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.assertIn('Success confirm and close', str(r_json)) # Check get info
# See sold from seller
r_json = self.app.get('/products/' + str(self.seller_id)).get_json()
self.assertIn('\'sold\': \'True\'', str(r_json)) # Check get info
r_json = self.app.get('/products').get_json()
self.assertNotIn('Producto Molongo', str(r_json)) # Check get info
<EMAIL>
def test_trades_delete(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Create Trade from buyer
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
r_json = self.app.post('/trade', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful trade created
trade_id = r_json["message"]
json_data = json.dumps({
"price": "99.9",
"products": [],
})
r_json = self.app.post('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful new offer', str(r_json)) # Check create offer
json_data = json.dumps({
"price": "22.9",
"products": [],
})
r_json = self.app.put('/trade/' + str(trade_id) + '/offer', data=json_data,
content_type='application/json').get_json()
self.assertIn('Successful offer update', str(r_json)) # Check update
json_data = json.dumps({
"body": "HELLO THERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/trades').get_json()
self.assertIn('\'length\': ', str(r_json)) # Check list trades
r_json = self.app.get('/trade/' + str(trade_id)).get_json()
self.assertIn('\'seller_id\': ' + str(self.seller_id), str(r_json)) # Check get info
self.app.put('/trade/' + str(trade_id) + '/confirm').get_json()
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.put('/trade/' + str(trade_id) + '/delete').get_json()
self.assertIn('Success delete', str(r_json)) # Check get info
r_json = self.app.get('/trades').get_json()
self.assertNotIn('22.9', str(r_json)) # Check get info
<EMAIL>
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Post test
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.app.delete('/user/' + str(self.buyer_id))
self.app.delete('/user/' + str(self.seller_id))
class CommentsAndMessages(unittest.TestCase):
<EMAIL>
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.seller_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.seller_id) + '/mod')
self.buyer_id = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
<EMAIL>
def test_comments(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"body": "ESRES UN CRACK",
"points": "3",
})
r_json = self.app.post('/comment/' + str(self.seller_id), data=json_data,
content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful creation
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertIn('ESRES UN CRACK', str(r_json)) # Check successful get
# @unittest.skip
def test_delete_comment(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"body": "ESRES UN CRACK",
"points": "3",
})
comment_id = self.app.post('/comment/' + str(self.seller_id), data=json_data,
content_type='application/json').get_json()["message"]
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertIn('ESRES UN CRACK', str(r_json)) # Check successful get
self.app.post('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.delete('/comment/' + str(comment_id) + "/del", data=json_data,
content_type='application/json').get_json()
self.assertIn('deleted', str(r_json)) # Check successful get
r_json = self.app.get('/comments/' + str(self.seller_id)).get_json()
self.assertNotIn('ESRES UN CRACK', str(r_json)) # Check successful get
<EMAIL>
def test_messages(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Post product
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.product_id = \
self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()[
"message"]
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
json_data = json.dumps({
"seller_id": str(self.seller_id),
"buyer_id": str(self.buyer_id),
"product_id": str(self.product_id)
})
trade_id = self.app.post('/trade', data=json_data, content_type='application/json').get_json()["message"]
json_data = json.dumps({
"body": "HELLO THERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"body": "HELLO HERE!"
})
r_json = self.app.post('/msgs/' + str(trade_id), data=json_data, content_type='application/json').get_json()
self.assertIn('Message created', str(r_json)) # Check successful creation
r_json = self.app.get('/msgs/' + str(trade_id)).get_json()
self.assertIn('HELLO HERE!', str(r_json)) # Check successful get
<EMAIL>
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.get('/logout').get_json()
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json').get_json()
self.app.delete('/user/' + str(self.buyer_id)).get_json()
self.app.delete('/user/' + str(self.seller_id)).get_json()
class Notifications(unittest.TestCase):
<EMAIL>
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.user_id) + '/mod')
<EMAIL>
def test_delete_all_notifications(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Nuevo producto en categoria e interés"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa 2"
})
self.app.post('/notification', data=json_data, content_type='application/json').get_json()
r_json = self.app.delete('/notifications').get_json()
self.assertIn('Successful delete', str(r_json)) # Check successful
r_json = self.app.get('/notifications').get_json()
self.assertIn('0', str(r_json)) # Check successful get 0 elements
<EMAIL>
def test_create_get_notification(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"user_id": self.user_id,
"product_id": 0,
"category": "null",
"text": "Otra cosa 2"
})
r_json = self.app.post('/notification', data=json_data, content_type='application/json').get_json()
self.assertIn('Notification pushed', str(r_json)) # Check successful creation
r_json = self.app.get('/notifications').get_json()
self.assertIn('Otra cosa', str(r_json)) # Check successful get
<EMAIL>
def test_follow_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.post('/product', data=ProductDataBase.prod_data, content_type='application/json').get_json()
product_id = r_json["message"]
# Follow
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
self.app.post('/product/' + str(product_id) + '/follow')
# Update
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.put('/product/' + str(product_id), data=ProductDataBase.prod_update,
content_type='application/json').get_json()
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('precio', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
# @unittest.skip
def test_pay_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
r_json = self.app.post('/product', data=ProductDataBase.prod_data,
content_type='application/json').get_json()
product_id = r_json["message"]
# add interest
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"list": ["Moda", "Complementos"]
})
self.app.post('/categories/interest', data=json_data, content_type='application/json')
# Pay
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
iban = "ES809999123125412535"
json_data = json.dumps({
"amount": 9.99,
"iban": iban,
"boost_date": "1999-12-24",
"product_id": int(product_id)
})
r_json = self.app.post('/payment', data=json_data, content_type='application/json').get_json()
self.assertIn('info', str(r_json)) # Check successful pay created
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('categoria', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
# @unittest.skip
def test_product_notify(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
user_2 = \
self.app.post('/user', data=UserDataBase.user_data2, content_type='application/json').get_json()[
"message"]
# add interest
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
json_data = json.dumps({
"list": ["Moda", "Complementos"]
})
self.app.post('/categories/interest', data=json_data, content_type='application/json')
# New product
self.app.post('/login', data=UserDataBase.user2_login, content_type='application/json')
self.app.post('/product', data=ProductDataBase.prod_data,
content_type='application/json')
# Check
self.app.get('/logout')
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
r_json = self.app.get('/notifications').get_json()
self.assertIn('categoria', str(r_json)) # Check successful get
self.app.delete('/user/' + str(user_2)).get_json()
<EMAIL>
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class UploadFiles(unittest.TestCase):
<EMAIL>
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
<EMAIL>
def test_upload(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.post('/login', data=UserDataBase.user_login, content_type='application/json')
f = open('./test/jake.jpg', 'rb')
data = {'file': f}
r_json = self.app.post('/upload', content_type='multipart/form-data', data=data).get_json()
file_url = r_json["message"]
f.close()
self.assertIn('info', str(r_json)) # Check successful upload
r = self.app.get(file_url)
self.assertIn("[200 OK]", str(r))
r.close()
file = file_url.split('/')[2]
os.remove("./images/" + file)
<EMAIL>
def tearDown(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app.delete('/user')
class Reports(unittest.TestCase):
<EMAIL>
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.app = webapp.app.test_client()
self.app.testing = True
# Create users and login
self.user_id = \
self.app.post('/user', data=UserDataBase.user_data, content_type='application/json').get_json()[
"message"]
self.app.put('/user/' + str(self.user_id) + '/mod')
<EMAIL>
def test_new_report(self):
with | |
from enum import IntEnum
from typing import Callable, Dict, List, Iterable
from spherov2.commands.animatronic import R2LegActions, Animatronic
from spherov2.commands.api_and_shell import ApiAndShell
from spherov2.commands.core import IntervalOptions, Core
from spherov2.commands.io import AudioPlaybackModes, IO
from spherov2.commands.power import Power
from spherov2.commands.sensor import CollisionDetectionMethods, Sensor, SensitivityBasedCollisionDetectionMethods, \
SensitivityLevels
from spherov2.commands.sphero import CollisionDetectionMethods as SpheroCollisionDetectionMethods, Sphero
from spherov2.controls import RawMotorModes
from spherov2.controls.v2 import Processors
from spherov2.toy import Toy
from spherov2.toy.bb9e import BB9E
from spherov2.toy.bolt import BOLT
from spherov2.toy.mini import Mini
from spherov2.toy.r2d2 import R2D2
from spherov2.toy.r2q5 import R2Q5
from spherov2.toy.rvr import RVR
class ToyUtil:
@staticmethod
def sleep(toy: Toy, not_supported_handler: Callable[[], None] = None):
if toy.implements(Core.sleep):
toy.sleep(IntervalOptions.NONE, 0, 0)
elif toy.implements(Power.sleep):
toy.sleep()
elif not_supported_handler:
not_supported_handler()
@staticmethod
def ping(toy: Toy, not_supported_handler: Callable[[], None] = None):
if toy.implements(Core.ping):
toy.ping()
elif toy.implements(ApiAndShell.ping):
toy.ping(None)
elif toy.implements(ApiAndShell.ping, True):
toy.ping(None, Processors.PRIMARY)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def roll_start(toy: Toy, heading: int, speed: int, not_supported_handler: Callable[[], None] = None):
if hasattr(toy, 'drive_control'):
toy.drive_control.roll_start(heading, speed)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def roll_stop(toy: Toy, heading: int, is_reverse: bool, not_supported_handler: Callable[[], None] = None):
if hasattr(toy, 'drive_control'):
if is_reverse:
heading = (heading + 180) % 360
toy.drive_control.roll_stop(heading)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def perform_leg_action(toy: Toy, leg_action: R2LegActions, not_supported_handler: Callable[[], None] = None):
if toy.implements(Animatronic.perform_leg_action):
toy.perform_leg_action(leg_action)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_stabilization(toy: Toy, stabilize, not_supported_handler: Callable[[], None] = None):
if hasattr(toy, 'drive_control'):
toy.drive_control.set_stabilization(stabilize)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_raw_motor(toy: Toy, left_mode: RawMotorModes, left_speed: int, right_mode: RawMotorModes, right_speed: int,
not_supported_handler: Callable[[], None] = None):
if hasattr(toy, 'drive_control'):
toy.drive_control.set_raw_motors(left_mode, left_speed, right_mode, right_speed)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def reset_heading(toy: Toy, not_supported_handler: Callable[[], None] = None):
if hasattr(toy, 'drive_control'):
toy.drive_control.reset_heading()
elif not_supported_handler:
not_supported_handler()
@staticmethod
def play_animation(toy: Toy, animation: IntEnum, wait: bool = False,
not_supported_handler: Callable[[], None] = None):
if toy.implements(Animatronic.play_animation):
toy.play_animation(animation, wait)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_head_position(toy: Toy, head_position: float, not_supported_handler: Callable[[], None] = None):
if toy.implements(Animatronic.set_head_position):
toy.set_head_position(head_position)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_main_led(toy: Toy, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
if isinstance(toy, (R2D2, R2Q5)):
mapping = {
toy.LEDs.BACK_RED: r,
toy.LEDs.BACK_GREEN: g,
toy.LEDs.BACK_BLUE: b,
toy.LEDs.FRONT_RED: r,
toy.LEDs.FRONT_GREEN: g,
toy.LEDs.FRONT_BLUE: b
}
elif isinstance(toy, BB9E):
mapping = {
toy.LEDs.BODY_RED: r,
toy.LEDs.BODY_GREEN: g,
toy.LEDs.BODY_BLUE: b
}
elif isinstance(toy, Mini):
mapping = {
toy.LEDs.BODY_RED: r,
toy.LEDs.BODY_GREEN: g,
toy.LEDs.BODY_BLUE: b,
toy.LEDs.USER_BODY_RED: r,
toy.LEDs.USER_BODY_GREEN: g,
toy.LEDs.USER_BODY_BLUE: b
}
elif isinstance(toy, RVR):
mapping = {
toy.LEDs.RIGHT_HEADLIGHT_RED: r,
toy.LEDs.RIGHT_HEADLIGHT_GREEN: g,
toy.LEDs.RIGHT_HEADLIGHT_BLUE: b,
toy.LEDs.LEFT_HEADLIGHT_RED: r,
toy.LEDs.LEFT_HEADLIGHT_GREEN: g,
toy.LEDs.LEFT_HEADLIGHT_BLUE: b,
toy.LEDs.LEFT_STATUS_INDICATION_RED: r,
toy.LEDs.LEFT_STATUS_INDICATION_GREEN: g,
toy.LEDs.LEFT_STATUS_INDICATION_BLUE: b,
toy.LEDs.RIGHT_STATUS_INDICATION_RED: r,
toy.LEDs.RIGHT_STATUS_INDICATION_GREEN: g,
toy.LEDs.RIGHT_STATUS_INDICATION_BLUE: b,
toy.LEDs.BATTERY_DOOR_FRONT_RED: r,
toy.LEDs.BATTERY_DOOR_FRONT_GREEN: g,
toy.LEDs.BATTERY_DOOR_FRONT_BLUE: b,
toy.LEDs.BATTERY_DOOR_REAR_RED: r,
toy.LEDs.BATTERY_DOOR_REAR_GREEN: g,
toy.LEDs.BATTERY_DOOR_REAR_BLUE: b,
toy.LEDs.POWER_BUTTON_FRONT_RED: r,
toy.LEDs.POWER_BUTTON_FRONT_GREEN: g,
toy.LEDs.POWER_BUTTON_FRONT_BLUE: b,
toy.LEDs.POWER_BUTTON_REAR_RED: r,
toy.LEDs.POWER_BUTTON_REAR_GREEN: g,
toy.LEDs.POWER_BUTTON_REAR_BLUE: b,
toy.LEDs.LEFT_BRAKELIGHT_RED: r,
toy.LEDs.LEFT_BRAKELIGHT_GREEN: g,
toy.LEDs.LEFT_BRAKELIGHT_BLUE: b,
toy.LEDs.RIGHT_BRAKELIGHT_RED: r,
toy.LEDs.RIGHT_BRAKELIGHT_GREEN: g,
toy.LEDs.RIGHT_BRAKELIGHT_BLUE: b
}
else:
mapping = None
def __fallback():
if toy.implements(Sphero.set_main_led):
toy.set_main_led(r, g, b)
elif not_supported_handler:
not_supported_handler()
ToyUtil.set_multiple_leds(toy, mapping, __fallback)
ToyUtil.set_led_matrix_one_colour(toy, r, g, b, _fallback)
@staticmethod
def set_head_led(toy: Toy, brightness: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, BB9E):
ToyUtil.set_multiple_leds(toy, {BB9E.LEDs.HEAD: brightness}, not_supported_handler)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_front_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.RIGHT_HEADLIGHT_RED: r,
RVR.LEDs.RIGHT_HEADLIGHT_GREEN: g,
RVR.LEDs.RIGHT_HEADLIGHT_BLUE: b,
RVR.LEDs.LEFT_HEADLIGHT_RED: r,
RVR.LEDs.LEFT_HEADLIGHT_GREEN: g,
RVR.LEDs.LEFT_HEADLIGHT_BLUE: b
}
elif isinstance(toy, (R2D2, R2Q5, BOLT)):
mapping = {
toy.LEDs.FRONT_RED: r,
toy.LEDs.FRONT_GREEN: g,
toy.LEDs.FRONT_BLUE: b
}
elif isinstance(toy, Mini):
mapping = {
toy.LEDs.BODY_RED: r,
toy.LEDs.BODY_GREEN: g,
toy.LEDs.BODY_BLUE: b
}
else:
mapping = None
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_back_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.RIGHT_BRAKELIGHT_RED: r,
RVR.LEDs.RIGHT_BRAKELIGHT_GREEN: g,
RVR.LEDs.RIGHT_BRAKELIGHT_BLUE: b,
RVR.LEDs.LEFT_BRAKELIGHT_RED: r,
RVR.LEDs.LEFT_BRAKELIGHT_GREEN: g,
RVR.LEDs.LEFT_BRAKELIGHT_BLUE: b
}
elif isinstance(toy, (R2D2, R2Q5, BOLT)):
mapping = {
toy.LEDs.BACK_RED: r,
toy.LEDs.BACK_GREEN: g,
toy.LEDs.BACK_BLUE: b
}
elif isinstance(toy, Mini):
mapping = {
toy.LEDs.USER_BODY_RED: r,
toy.LEDs.USER_BODY_GREEN: g,
toy.LEDs.USER_BODY_BLUE: b
}
else:
mapping = None
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_back_led_brightness(toy: Toy, brightness: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, (R2D2, R2Q5, BOLT)):
mapping = {
toy.LEDs.BACK_RED: 0,
toy.LEDs.BACK_GREEN: 0,
toy.LEDs.BACK_BLUE: brightness,
}
elif isinstance(toy, (BB9E, Mini)):
mapping = {
toy.LEDs.AIMING: brightness
}
elif isinstance(toy, RVR):
mapping = {
RVR.LEDs.RIGHT_BRAKELIGHT_RED: 0,
RVR.LEDs.RIGHT_BRAKELIGHT_GREEN: 0,
RVR.LEDs.RIGHT_BRAKELIGHT_BLUE: brightness,
RVR.LEDs.LEFT_BRAKELIGHT_RED: 0,
RVR.LEDs.LEFT_BRAKELIGHT_GREEN: 0,
RVR.LEDs.LEFT_BRAKELIGHT_BLUE: brightness
}
else:
mapping = None
def _fallback():
if toy.implements(Sphero.set_back_led_brightness):
toy.set_back_led_brightness(brightness)
elif not_supported_handler:
not_supported_handler()
ToyUtil.set_multiple_leds(toy, mapping, _fallback)
@staticmethod
def set_left_front_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
mapping = None
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.LEFT_HEADLIGHT_RED: r,
RVR.LEDs.LEFT_HEADLIGHT_GREEN: g,
RVR.LEDs.LEFT_HEADLIGHT_BLUE: b
}
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_right_front_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
mapping = None
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.RIGHT_HEADLIGHT_RED: r,
RVR.LEDs.RIGHT_HEADLIGHT_GREEN: g,
RVR.LEDs.RIGHT_HEADLIGHT_BLUE: b
}
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_battery_side_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
mapping = None
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.BATTERY_DOOR_FRONT_RED: r,
RVR.LEDs.BATTERY_DOOR_FRONT_GREEN: g,
RVR.LEDs.BATTERY_DOOR_FRONT_BLUE: b
}
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_power_side_led(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
mapping = None
if isinstance(toy, RVR):
mapping = {
RVR.LEDs.POWER_BUTTON_FRONT_RED: r,
RVR.LEDs.POWER_BUTTON_FRONT_GREEN: g,
RVR.LEDs.POWER_BUTTON_FRONT_BLUE: b
}
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_holo_projector(toy: Toy, brightness: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, (R2D2, R2Q5)):
mapping = {toy.LEDs.HOLO_PROJECTOR: brightness}
else:
mapping = None
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_logic_display(toy: Toy, brightness: int, not_supported_handler: Callable[[], None] = None):
if isinstance(toy, (R2D2, R2Q5)):
mapping = {toy.LEDs.LOGIC_DISPLAYS: brightness}
else:
mapping = None
ToyUtil.set_multiple_leds(toy, mapping, not_supported_handler)
@staticmethod
def set_multiple_leds(toy: Toy, mapping: Dict[IntEnum, int], not_supported_handler: Callable[[], None] = None):
if mapping and hasattr(toy, 'multi_led_control'):
toy.multi_led_control.set_leds(mapping)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_led_matrix_one_colour(toy: Toy, r: int, g: int, b: int, not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.set_compressed_frame_player_one_color):
toy.set_compressed_frame_player_one_color(r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_pixel(toy: Toy, x: int, y: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_pixel(toy, x, y, r, g, b, _fallback)
@staticmethod
def set_led_matrix_pixel(toy: Toy, x: int, y: int, r: int, g: int, b: int,
not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.set_compressed_frame_player_pixel):
toy.set_compressed_frame_player_pixel(x, y, r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_line(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_line(toy, x1, y1, x2, y2, r, g, b, _fallback)
@staticmethod
def set_led_matrix_line(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int,
not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.draw_compressed_frame_player_line):
toy.draw_compressed_frame_player_line(x1, y1, x2, y2, r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_fill(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_fill(toy, x1, y1, x2, y2, r, g, b, _fallback)
@staticmethod
def set_led_matrix_fill(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int,
not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.draw_compressed_frame_player_fill):
toy.draw_compressed_frame_player_fill(x1, y1, x2, y2, r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_pixel(toy: Toy, x: int, y: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_pixel(toy, x, y, r, g, b, _fallback)
@staticmethod
def set_led_matrix_pixel(toy: Toy, x: int, y: int, r: int, g: int, b: int,
not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.set_compressed_frame_player_pixel):
toy.set_compressed_frame_player_pixel(x, y, r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_line(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_line(toy, x1, y1, x2, y2, r, g, b, _fallback)
@staticmethod
def set_led_matrix_line(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int,
not_supported_handler: Callable[[], None] = None):
if toy.implements(IO.draw_compressed_frame_player_line):
toy.draw_compressed_frame_player_line(x1, y1, x2, y2, r, g, b)
elif not_supported_handler:
not_supported_handler()
@staticmethod
def set_matrix_fill(toy: Toy, x1: int, y1: int, x2: int, y2: int, r: int, g: int, b: int, is_user_color: bool,
not_supported_handler: Callable[[], None] = None):
def _fallback():
not_supported_handler()
ToyUtil.set_led_matrix_fill(toy, x1, y1, x2, y2, r, g, b, _fallback)
@staticmethod
def set_led_matrix_fill(toy: Toy, x1: int, y1: int, | |
# @Created Date: 2019-12-08 06:46:49 pm
# @Filename: api.py
# @Email: <EMAIL>
# @Author: <NAME>
# @Last Modified: 2020-02-16 10:54:32 am
# @Copyright (c) 2020 MinghuiGroup, Soochow University
from typing import Iterable, Iterator, Optional, Union, Generator, Dict, List
from time import perf_counter
from numpy import nan, array
from pathlib import Path
from unsync import unsync, Unfuture
from copy import deepcopy
from pdb_profiling.log import Abclog
from pdb_profiling.utils import init_semaphore, init_folder_from_suffix, init_folder_from_suffixes, a_read_csv
from pdb_profiling.fetcher.webfetch import UnsyncFetch
from uuid import uuid4
from pdb_profiling.cif_gz_stream import iter_index
from aiohttp import ClientSession
from aiofiles import open as aiofiles_open
from pdb_profiling.ensure import EnsureBase
from tenacity import wait_random, stop_after_attempt
ensure = EnsureBase()
rt_kw = dict(wait=wait_random(max=20), stop=stop_after_attempt(6))
"""QUERY_COLUMNS: List[str] = [
'id', 'length', 'reviewed',
'comment(ALTERNATIVE%20PRODUCTS)',
'feature(ALTERNATIVE%20SEQUENCE)',
'genes', 'organism', 'protein%20names']
RESULT_COLUMNS: List[str] = [
'Entry', 'Length', 'Status',
'Alternative products (isoforms)',
'Alternative sequence',
'Gene names', 'Organism', 'Protein names']
COLUMNS_DICT: Dict = dict(zip(QUERY_COLUMNS, RESULT_COLUMNS))
RESULT_NEW_COLUMN: List[str] = ['yourlist', 'isomap']"""
BASE_URL: str = 'https://www.uniprot.org'
"""PARAMS: Dict = {
# 'fil': 'organism%3A"Homo+sapiens+(Human)+[9606]"+AND+reviewed%3Ayes',
# reviewed:yes+AND+organism:9606
'columns': None,
'query': None,
'from': None,
'to': 'ACC',
'format': 'tab'}"""
"""
class MapUniProtID(Abclog):
'''
Implement UniProt Retrieve/ID Mapping API
'''
def __init__(self, id_col: str, id_type: str,
dfrm: Optional[DataFrame],
ids: Optional[Iterable] = None,
sites: Optional[Iterable] = None,
genes: Optional[Iterable] = None,
usecols: Optional[Iterable] = QUERY_COLUMNS,
site_col: Optional[str] = None,
gene_col: Optional[str] = None,
logger: Optional[logging.Logger] = None,
loggingPath: Optional[str] = None):
self.init_logger(self.__class__.__name__, logger)
if dfrm is not None:
self.dfrm = dfrm.drop_duplicates().reset_index(drop=True)
else:
'''
the length of dataframe is based on:
* the num of `ids` if there is more than one id
* the num of `sites` if there is just one id with specified `sites`
'''
if isinstance(ids, str):
if sites is not None and not isinstance(sites, str):
index_len = len(sites)
else:
index_len = 1
else:
index_len = len(ids)
self.dfrm = DataFrame(dict(zip(
(col for col in (id_col, site_col, gene_col) if col is not None),
(value for value in (ids, sites, genes) if value is not None))),
index=list(range(index_len)))
self.index = dfrm.index
self.id_col = id_col
self.id_type = id_type
self.site_col = site_col
self.gene_col = gene_col
self.loggingPath = loggingPath
if isinstance(usecols, str):
PARAMS['columns'] = usecols
usecols = usecols.split(',')
elif isinstance(usecols, (Iterable, Iterator)):
PARAMS['columns'] = ','.join(usecols)
else:
raise ValueError('Invalid usecols')
self.usecols = usecols
PARAMS['from'] = id_type
if isinstance(loggingPath, (str, Path)):
self.set_logging_fileHandler(loggingPath)
@property
def sites(self) -> Generator:
if self.site_col is not None:
for name, group in self.dfrm.groupby(by=self.id_col, sort=False):
yield name, group[self.site_col]
else:
yield None
@staticmethod
def split_df(dfrm, colName, sep):
'''Split DataFrame'''
df = dfrm.copy()
return df.drop([colName], axis=1).join(df[colName].str.split(sep, expand=True).stack().reset_index(level=1, drop=True).rename(colName))
def yieldTasks(self, lyst: Iterable, chunksize: int = 100, sep: str = ',') -> Generator:
fileName = self.outputPath.stem
for i in range(0, len(lyst), chunksize):
cur_fileName = f'{fileName}+{i}'
cur_params = deepcopy(PARAMS)
cur_params['query'] = sep.join(lyst[i:i+chunksize]) # self.outputPath.suffix
yield ('get', {'url': f'{BASE_URL}/uploadlists/', 'params': cur_params}, str(Path(self.outputPath.parent, cur_fileName+'.tsv')))
def retrieve(self, outputPath: Union[str, Path],
finishedPath: Optional[str] = None,
sep: str = '\t',
chunksize: int = 100,
concur_req: int = 20,
rate: float = 1.5,
ret_res: bool = True,
semaphore = None):
finish_id = list()
self.outputPath = Path(outputPath)
self.result_cols = [COLUMNS_DICT.get(
i, i) for i in self.usecols] + RESULT_NEW_COLUMN
if finishedPath is not None:
try:
target_col = RESULT_NEW_COLUMN[0]
finish: Series = read_csv(
finishedPath,
sep=sep,
usecols=[target_col],
names=self.result_cols,
skiprows=1,
header=None)[target_col]
except Exception as e:
col_to_add = RESULT_NEW_COLUMN[1]
self.logger.warning(
f"{e}\nSomething wrong with finished raw file, probably without '{col_to_add}' column.")
finish_df = read_csv(
finishedPath, sep=sep, names=self.result_cols[:-1], skiprows=1, header=None)
finish_df[col_to_add] = nan
finish_df.to_csv(finishedPath, sep=sep, index=False)
finish: Series = finish_df[target_col]
for query_id in finish:
if ',' in query_id:
finish_id.extend(query_id.split(','))
else:
finish_id.append(query_id)
query_id: Series = self.dfrm[self.id_col]
if finish_id:
rest_id = list(set(query_id) - set(finish_id))
else:
rest_id = query_id.unique()
self.logger.info(
f"Have finished {len(finish_id)} ids, {len(rest_id)} ids left.")
res = UnsyncFetch.multi_tasks(
tasks=self.yieldTasks(rest_id, chunksize),
to_do_func=self.process,
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=semaphore)
return res
def getCanonicalInfo(self, dfrm: DataFrame):
'''
Will Change the dfrm
* Add new column (canonical_isoform)
* Change the content of column (UniProt)
'''
# Get info from Alt Product file
if self.altProPath is None:
dfrm['canonical_isoform'] = nan
return dfrm
else:
usecols = ["IsoId", "Sequence", "Entry", "UniProt"]
altPro_df = read_csv(self.altProPath, sep="\t", usecols=usecols)
altPro_df = altPro_df[altPro_df["Sequence"]
== "Displayed"].reset_index(drop=True)
altPro_df.rename(
columns={"IsoId": "canonical_isoform"}, inplace=True)
# Modify dfrm
dfrm = merge(
dfrm, altPro_df[["canonical_isoform", "Entry"]], how="left")
return dfrm
def getGeneStatus(self, handled_df: DataFrame, colName: str = 'GENE_status'):
'''
Will Change the dfrm, add Gene Status
* Add new column (GENE) # if id_col != gene_col
* Add new column (GENE_status)
**About GENE_status**
* ``False`` : First element of Gene names is not correspond with refSeq's GENE (e.g)
* others(corresponding GENE)
'''
self.gene_status_col = colName
if self.id_type != 'GENENAME':
if self.gene_col is None:
handled_df[colName] = True
return None
gene_map = self.dfrm[[self.id_col,
self.gene_col]].drop_duplicates()
gene_map = gene_map.groupby(self.id_col)[self.gene_col].apply(
lambda x: array(x) if len(x) > 1 else list(x)[0])
handled_df['GENE'] = handled_df.apply(
lambda z: gene_map[z['yourlist']], axis=1)
handled_df[colName] = handled_df.apply(lambda x: x['GENE'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
handled_df['GENE'] = handled_df['GENE'].apply(
lambda x: ','.join(x) if not isinstance(x, str) else x)
else:
handled_df[colName] = handled_df.apply(lambda x: x['yourlist'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
def label_mapping_status(self, dfrm: DataFrame, colName: str = 'Mapping_status'):
self.mapping_status_col = colName
gene_status_col = self.gene_status_col
dfrm[colName] = 'No'
dfrm[gene_status_col] = dfrm[gene_status_col].apply(
lambda x: x.any() if isinstance(x, Iterable) else x)
if self.id_col == 'GENENAME':
pass_df = dfrm[
(dfrm[gene_status_col] == True) &
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
else:
pass_df = dfrm[
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
pass_index = pass_df.index
dfrm.loc[pass_index, colName] = 'Yes'
# Deal with 'one to many' situation
multipleCounter = Counter(dfrm.loc[pass_index, 'yourlist'])
err_li = [i for i, j in multipleCounter.items() if j > 1]
err_index = pass_df[pass_df['yourlist'].isin(err_li)].index
dfrm.loc[err_index, colName] = 'Error'
@unsync
async def process(self, path: Union[str, Path, Unfuture], sep: str = '\t'):
self.logger.debug("Start to handle id mapping result")
if not isinstance(path, (Path, str)):
path = await path # .result()
if not Path(path).stat().st_size:
return None
self.altSeqPath, self.altProPath = ExtractIsoAlt.main(path=path)
try:
df = read_csv(
path, sep='\t', names=self.result_cols, skiprows=1, header=None)
except ValueError:
df = read_csv(
path, sep='\t', names=self.result_cols[:-1], skiprows=1, header=None)
# Add New Column: canonical_isoform
df = self.getCanonicalInfo(df)
# Add New Column: unp_map_tage
df['unp_map_tage'] = nan
# Classification
df_with_no_isomap = df[df['isomap'].isnull()] # Class A
df_with_isomap = df[df['isomap'].notnull()] # Class B
# ----------------------------------------------------------------------
# In Class A
# ----------------------------------------------------------------------
if len(df_with_no_isomap) > 0:
df_wni_split = self.split_df(df_with_no_isomap, 'yourlist', ',')
df_wni_split.drop(columns=['isomap'], inplace=True)
# [yourlist <-> UniProt]
df_wni_split['UniProt'] = df_wni_split['Entry']
df_wni_split['unp_map_tage'] = 'Trusted & No Isoform'
# Find out special cases 1
df_wni_split_warn = df_wni_split[df_wni_split['Alternative products (isoforms)'].notnull(
)].index
df_wni_split.loc[df_wni_split_warn,
'unp_map_tage'] = 'Untrusted & No Isoform'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# ----------------------------------------------------------------------
# In Class B
# ----------------------------------------------------------------------
if len(df_with_isomap) > 0:
wi_yourlist_count = df_with_isomap.apply(
lambda x: x['yourlist'].count(','), axis=1)
wi_isomap_count = df_with_isomap.apply(
lambda x: x['isomap'].count(','), axis=1)
# In subClass 1
df_wi_eq = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count ==
wi_isomap_count].index]
if len(df_wi_eq) > 0:
df_wi_eq_split = self.split_df(
df_wi_eq.drop(columns=['yourlist']), 'isomap', ',')
df_wi_eq_split[['yourlist', 'UniProt']] = df_wi_eq_split['isomap'].str.split(
' -> ', expand=True)
# [yourlist <-> UniProt]
df_wi_eq_split.drop(columns=['isomap'], inplace=True)
df_wi_eq_split['unp_map_tage'] = 'Trusted & Isoform'
# # 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# In subClass 2
df_wi_ne = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count !=
wi_isomap_count].index]
if len(df_wi_ne) > 0:
df_wi_ne_split = self.split_df(df_wi_ne, 'isomap', ',')
df_wi_ne_split.rename(
columns={'yourlist': 'checkinglist'}, inplace=True)
df_wi_ne_split[['yourlist', 'UniProt']] = df_wi_ne_split['isomap'].str.split(
' -> ', expand=True)
df_wi_ne_split.drop(columns=['isomap'], inplace=True)
df_wi_ne_split['unp_map_tage'] = 'Trusted & Isoform & Contain Warnings'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt', 'checkinglist'
# Find out special cases 2
usecols = Index(set(df_wi_ne_split.columns) -
{'yourlist', 'UniProt'})
df_wi_ne_warn = self.split_df(
df_wi_ne_split[usecols].drop_duplicates(), 'checkinglist', ',')
df_wi_ne_warn = df_wi_ne_warn[~df_wi_ne_warn['checkinglist'].isin(
df_wi_ne_split['yourlist'])].rename(columns={'checkinglist': 'yourlist'})
df_wi_ne_warn['UniProt'] = df_wi_ne_warn['Entry']
# sequence conflict
df_wi_ne_warn['unp_map_tage'] = 'Untrusted & No Isoform'
df_wi_ne_split.drop(columns=['checkinglist'], inplace=True)
# Concat Dfrm
variables = ["df_wni_split", "df_wi_eq_split",
"df_wi_ne_split", "df_wi_ne_warn"]
lvs = locals()
varLyst = [lvs[variable] for variable in variables if variable in lvs]
final_df = concat(varLyst, sort=False).reset_index(drop=True)
cano_index = final_df[final_df["canonical_isoform"].notnull()].index
if len(cano_index) > 0:
final_df.loc[cano_index, "UniProt"] = final_df.loc[cano_index, ].apply(
lambda x: x["Entry"] if x["UniProt"] in x["canonical_isoform"] else x["UniProt"], axis=1)
# Add Gene Status
self.getGeneStatus(final_df)
# Label Mapping Status
self.label_mapping_status(final_df)
pathOb = Path(path)
edPath = str(Path(pathOb.parent, f'{pathOb.stem}_ed.tsv')) # {pathOb.suffix}
final_df.to_csv(edPath, sep=sep, index=False)
self.logger.debug(f"Handled id mapping result saved in {edPath}")
return edPath
"""
class UniProtAPI(Abclog):
| |
#!/usr/bin/env python3
"""Classes and functions to insert cache file data into the database."""
# Standard libraries
import os
import time
import shutil
from collections import defaultdict
from multiprocessing import Pool
import re
import pymysql
# PIP libraries
from sqlalchemy import and_
# Infoset libraries
from infoset.db import db
from infoset.db.db_orm import Data, Datapoint, Agent, Device, DeviceAgent
from infoset.db.db_orm import AgentName
from infoset.db import db_agent
from infoset.db import db_agentname
from infoset.db import db_device
from infoset.db import db_deviceagent
from infoset.utils import configuration
from infoset.utils import general
from infoset.utils import log
from infoset.cache import drain
from infoset.utils import daemon
class _ProcessAgentCache(object):
"""Processes cache files from a single agent.
The ingester daemon periodically runs methods in this class.
Methodology:
1) JSON data from each successive cache file is converted to a series
of dicts using the Drain class in infoset.cache.drain
2) Data from invalid files are discarded and moved to a failure
directory for future analysis.
3) The timestamp of the ingester's PID file is updated with each valid
file found.
The ingester normally updates the PID file periodically while it
waits for new data. It is automatically restarted if there is no
PID file activity as it is assumed the ingester is hung.
The ingester therefore needs to continue updating its PID file while
it is processing data to reduce this risk.
4) Newly discovered agents, devices and datapoints are added to
the database. The AgentDevice table is also updated to track the
devices each agent is tracking.
5) The actual datapoint values are added to the database.
6) Database table rows are updated with the timestamp of this most
recent update.
"""
def __init__(self, config, metadata, ingester_agent_name):
"""Initialize the class.
args:
config: Config object
metadata: Metadata
ingester_agent_name: Ingester's agent name
"""
self.config = config
self.metadata = metadata
self.ingester_agent_name = ingester_agent_name
def process(self):
"""Update the database using threads."""
# Initialize key variables
do_update = False
success = None
ingests = []
agent_data = {
'devicename': None,
'id_agent': None,
'sources': [],
'timeseries': [],
'timefixed': [],
'max_timestamp': 0
}
# Get the directory to which failed files will be moved
failure_directory = self.config.ingest_failures_directory()
# Get start time for activity
start_ts = time.time()
# Process file for each timestamp, starting from the oldes file
for data_dict in self.metadata:
# Initialize key variables
timestamp = data_dict['timestamp']
filepath = data_dict['filepath']
# Read in data
ingest = drain.Drain(filepath)
# Make sure file is OK
# Move it to a directory for further analysis
# by administrators
if ingest.valid() is False:
log_message = (
'Cache ingest file %s is invalid. Moving.'
'') % (filepath)
log.log2warning(1054, log_message)
shutil.copy(filepath, failure_directory)
os.remove(filepath)
continue
# Append data
agent_data['timeseries'].extend(ingest.timeseries())
agent_data['timefixed'].extend(ingest.timefixed())
agent_data['sources'].extend(ingest.sources())
# Append ingest object to a list for later processing
ingests.append(ingest)
# Update information that doesn't change
if do_update is False:
agent_data['devicename'] = ingest.devicename()
agent_data['id_agent'] = ingest.id_agent()
agent_data['agent_name'] = ingest.agent()
# Get the PID file for the agent
pid_file = daemon.pid_file(self.ingester_agent_name)
else:
# Get the max timestamp
agent_data['max_timestamp'] = max(
timestamp, agent_data['max_timestamp'])
# Update the PID file for the agent to ensure agentd.py
# doesn't kill the ingest while processing a long stream
# of files. If we are running this using __main__ = process()
# then the pid file wouldn't have been created, hence the logic.
if os.path.isfile(pid_file) is True:
daemon.update_pid(self.ingester_agent_name)
# Update update flag
do_update = True
# Process the rest
if do_update is True:
# Upadate and note success
(success, datapoints_processed) = self._do_update(
agent_data, ingests)
# Log duration of activity
duration = time.time() - start_ts
if success is True:
log_message = (
'Agent %s was processed from %s cache files in %s '
'seconds (%s seconds/file, %s seconds/datapoint)'
'') % (
agent_data['id_agent'],
len(ingests),
round(duration, 4),
round(duration / len(ingests), 4),
round(duration / datapoints_processed, 6))
log.log2info(1007, log_message)
else:
log_message = (
'Failed to process all cache files for agent %s. '
'Investigate.') % (
agent_data['id_agent'])
log.log2info(1008, log_message)
def _do_update(self, agent_data, ingests):
"""Update the database using threads."""
# Initialize key variables
max_timestamp = agent_data['max_timestamp']
# Add datapoints to the database
db_prepare = _PrepareDatabase(agent_data)
db_prepare.add_datapoints()
# Get the latest datapoints
datapoints = db_prepare.get_datapoints()
# Get the assigned index values for the device and agent
idx_device = db_prepare.idx_device()
idx_agent = db_prepare.idx_agent()
# Update database with data
db_update = _UpdateDB(agent_data, datapoints)
success = db_update.update()
#####################################################################
#####################################################################
#
# We need to update the database with last update data and purge
# files whether successful or not. This is a precaution to prevent
# database corruption if multiple sets of bogus data is posted with
# valid data
#
#####################################################################
#####################################################################
# Update database table timestamps
update_timestamps = _UpdateLastTimestamp(
idx_device, idx_agent, max_timestamp)
update_timestamps.deviceagent()
update_timestamps.datapoint()
# Purge source files. Only done after complete
# success of database updates. If not we could lose data in the
# event of an ingester crash. Ingester would re-read the files
# and process the non-duplicates, while deleting the duplicates.
for ingest in ingests:
ingest.purge()
# Return
return (success, len(datapoints))
class _PrepareDatabase(object):
"""Prepare database for insertion of new datapoint values.
Newly discovered agents, devices and datapoints are added to
the database.
The AgentDevice table is also updated to track the devices
each agent is tracking.
"""
def __init__(self, agent_data):
"""Instantiate the class.
Args:
agent_data: Agent data from successive Drains
Returns:
None
"""
# Initialize key variables
self.agent_data = agent_data
# Update Agent, Device and DeviceAgent database tables if
# Device and agent are not already there
self._idx_agent = self.idx_agent()
self._idx_device = self.idx_device()
self._idx_deviceagent = db_deviceagent.GetDeviceAgent(
self._idx_device, self._idx_agent).idx_deviceagent()
def idx_agent(self):
"""Insert new agent into database if necessary.
Args:
None
Returns:
idx_agent: IDX value of agent from database
"""
# Initialize key variables
agent_name = self.agent_data['agent_name']
id_agent = self.agent_data['id_agent']
# Get information on agent from database
agent_data = db_agent.GetIDAgent(id_agent)
# Return if agent already exists in the table
if agent_data.exists() is True:
idx_agent = agent_data.idx_agent()
return idx_agent
# Get information on agent from database
name_data = db_agentname.GetAgentName(agent_name)
# Insert data into table if required
# Get idx_agentname
if name_data.exists() is False:
record = AgentName(
name=general.encode(agent_name))
database = db.Database()
try:
database.add(record, 1145)
except pymysql.IntegrityError:
# There may be a duplicate agent name if this is a brand
# new database and there is a flurry of updates from multiple
# agents. This is OK, pass.
#
# We are expecting a 'pymysql.err.IntegrityError' but for some
# reason it could not be caught.
pass
new_name_data = db_agentname.GetAgentName(agent_name)
idx_agentname = new_name_data.idx_agentname()
else:
idx_agentname = name_data.idx_agentname()
# Add record to the database
new_record = Agent(
id_agent=general.encode(id_agent),
idx_agentname=idx_agentname)
database = db.Database()
database.add(new_record, 1081)
# Get idx_agent value from database
new_agent_data = db_agent.GetIDAgent(id_agent)
idx_agent = new_agent_data.idx_agent()
return idx_agent
def idx_device(self):
"""Insert new device into database if necessary.
Args:
None
Returns:
idx_device: Index value for device record
"""
# Initialize key variables
devicename = self.agent_data['devicename']
# Get information on agent from database
device = db_device.GetDevice(devicename)
# Determine index value for device
if device.exists() is True:
idx_device = device.idx_device()
else:
# Add record to the database
record = Device(devicename=general.encode(devicename))
database = db.Database()
database.add(record, 1080)
# Get idx of newly added device
device_info = db_device.GetDevice(devicename)
idx_device = device_info.idx_device()
# Update DeviceAgent table
idx_agent = self._idx_agent
if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False:
# Add to DeviceAgent table
new_record = DeviceAgent(
idx_device=idx_device, idx_agent=idx_agent)
database = db.Database()
database.add(new_record, 1094)
# Return
return idx_device
def add_datapoints(self):
"""Add new datapoints to the database.
Args:
None
Returns:
None
"""
# Initialize key variables
new_datapoint_ids = []
# Add newly found datapoints to database if agent is enabled
agent_object = db_agent.GetIDXAgent(self._idx_agent)
if agent_object.enabled() is True:
# Create map of DIDs to database row index values
dp_metadata = self.get_datapoints()
# Update datapoint metadata if not there
# Use a dictionary query versus individual database calls
# which was slow.
for source in self.agent_data['sources']:
id_datapoint = source['id_datapoint']
if id_datapoint not in dp_metadata:
# This is a protection against the scenario where
# the very first contact from an agent is a result
# of a stream of data postings of cached data.
# The datapoints are not originally in the | |
read_only: Optional[bool] = None,
):
self.chapAuthDiscovery = chap_auth_discovery
self.chapAuthSession = chap_auth_session
self.fsType = fs_type
self.initiatorName = initiator_name
self.iqn = iqn
self.lun = lun
self.portals = portals
self.secretRef = secret_ref
self.targetPortal = target_portal
self.iscsiInterface = iscsi_interface
self.readOnly = read_only
class GCEPersistentDiskVolumeSource(HelmYaml):
"""
:param fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure \
that the filesystem type is supported by the host operating system. Examples: \
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More \
info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param pd_name: Unique name of the PD resource in GCE. Used to identify the disk in \
GCE. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param partition: The partition in the volume that you want to mount. If omitted, \
the default is to mount by volume name. Examples: For volume /dev/sda1, you \
specify the partition as "1". Similarly, the volume partition for /dev/sda is \
"0" (or you can leave the property empty). More info: \
https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param read_only: ReadOnly here will force the ReadOnly setting in VolumeMounts. \
Defaults to false. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
def __init__(
self,
fs_type: str,
pd_name: str,
partition: Optional[int] = None,
read_only: Optional[bool] = None,
):
self.fsType = fs_type
self.pdName = pd_name
self.partition = partition
self.readOnly = read_only
class NFSVolumeSource(HelmYaml):
"""
:param path: Path that is exported by the NFS server. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param server: Server is the hostname or IP address of the NFS server. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param read_only: ReadOnly here will force the NFS export to be mounted with \
read-only permissions. Defaults to false. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
def __init__(self, path: str, server: str, read_only: Optional[bool] = None):
self.path = path
self.server = server
self.readOnly = read_only
class ConfigMapVolumeSource(HelmYaml):
"""
:param name: Name of the referent. More info: \
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa
:param optional: Specify whether the ConfigMap or its keys must be defined
:param default_mode: Optional: mode bits to use on created files by default. Must \
be a value between 0 and 0777. Defaults to 0644. Directories within the path \
are not affected by this setting. This might be in conflict with other options \
that affect the file mode, like fsGroup, and the result can be other mode bits \
set.
:param items: If unspecified, each key-value pair in the Data field of the \
referenced ConfigMap will be projected into the volume as a file whose name is \
the key and content is the value. If specified, the listed keys will be \
projected into the specified paths, and unlisted keys will not be present. If \
a key is specified which is not present in the ConfigMap, the volume setup \
will error unless it is marked optional. Paths must be relative and may not \
contain the '..' path or start with '..'.
"""
def __init__(
self,
name: str,
optional: bool,
default_mode: Optional[int] = None,
items: Optional[List[KeyToPath]] = None,
):
self.name = name
self.optional = optional
self.defaultMode = default_mode
self.items = items
class PersistentVolumeClaimVolumeSource(HelmYaml):
"""
:param claim_name: ClaimName is the name of a PersistentVolumeClaim in the same \
namespace as the pod using this volume. More info: \
https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa
:param read_only: Will force the ReadOnly setting in VolumeMounts. Default false.
"""
def __init__(self, claim_name: str, read_only: bool):
self.claimName = claim_name
self.readOnly = read_only
class GitRepoVolumeSource(HelmYaml):
"""
:param repository: Repository URL
:param revision: Commit hash for the specified revision.
:param directory: Target directory name. Must not contain or start with '..'. If \
'.' is supplied, the volume directory will be the git repository. Otherwise, \
if specified, the volume will contain the git repository in the subdirectory \
with the given name.
"""
def __init__(self, repository: str, revision: str, directory: Optional[str] = None):
self.repository = repository
self.revision = revision
self.directory = directory
class Volume(HelmYaml):
"""
:param name: Volume's name. Must be a DNS_LABEL and unique within the pod. More \
info: \
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa
:param config_map: ConfigMap represents a configMap that should populate this \
volume
:param downward_api: DownwardAPI represents downward API about the pod that should \
populate this volume
:param empty_dir: EmptyDir represents a temporary directory that shares a pod's \
lifetime. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#emptydir
:param git_repo: GitRepo represents a git repository at a particular revision. \
DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, \
mount an EmptyDir into an InitContainer that clones the repo using git, then \
mount the EmptyDir into the Pod's container.
:param host_path: HostPath represents a pre-existing file or directory on the host \
machine that is directly exposed to the container. This is generally used for \
system agents or other privileged things that are allowed to see the host \
machine. Most containers will NOT need this. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:param persistent_volume_claim: PersistentVolumeClaimVolumeSource represents a \
reference to a PersistentVolumeClaim in the same namespace. More info: \
https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa
:param projected: Items for all in one resources secrets, configmaps, and downward \
API
:param secret: Secret represents a secret that should populate this volume. More \
info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param aws_elastic_block_store: AWSElasticBlockStore represents an AWS Disk \
resource that is attached to a kubelet's host machine and then exposed to the \
pod. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param azure_disk: AzureDisk represents an Azure Data Disk mount on the host and \
bind mount to the pod.
:param azure_file: AzureFile represents an Azure File Service mount on the host and \
bind mount to the pod.
:param cephfs: CephFS represents a Ceph FS mount on the host that shares a pod's \
lifetime
:param cinder: Cinder represents a cinder volume attached and mounted on kubelets \
host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
:param csi: CSI (Container Storage Interface) represents storage that is handled by \
an external CSI driver (Alpha feature).
:param fc: FC represents a Fibre Channel resource that is attached to a kubelet's \
host machine and then exposed to the pod.
:param flex_volume: FlexVolume represents a generic volume resource that is \
provisioned/attached using an exec based plugin.
:param flocker: Flocker represents a Flocker volume attached to a kubelet's host \
machine. This depends on the Flocker control service being running
:param gce_persistent_disk: GCEPersistentDisk represents a GCE Disk resource that \
is attached to a kubelet's host machine and then exposed to the pod. More \
info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param glusterfs: Glusterfs represents a Glusterfs mount on the host that shares a \
pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md
:param iscsi: ISCSI represents an ISCSI Disk resource that is attached to a \
kubelet's host machine and then exposed to the pod. More info: \
https://examples.k8s.io/volumes/iscsi/README.md
:param nfs: NFS represents an NFS mount on the host that shares a pod's lifetime \
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param photon_persistent_disk: PhotonPersistentDisk represents a PhotonController \
persistent disk attached and mounted on kubelets host machine
:param portworx_volume: PortworxVolume represents a portworx volume attached and \
mounted on kubelets host machine
:param quobyte: Quobyte represents a Quobyte mount on the host that shares a pod's \
lifetime
:param rbd: RBD represents a Rados Block Device mount on the host that shares a \
pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md
:param scale_io: ScaleIO represents a ScaleIO persistent volume attached and \
mounted on Kubernetes nodes.
:param storageos: StorageOS represents a StorageOS volume attached and mounted on \
Kubernetes nodes.
:param vsphere_volume: VsphereVolume represents a vSphere volume attached and \
mounted on kubelets host machine
"""
def __init__(
self,
name: str,
config_map: Optional[ConfigMapVolumeSource] = None,
downward_api: Optional[DownwardAPIVolumeSource] = None,
empty_dir: Optional[EmptyDirVolumeSource] = None,
git_repo: Optional[GitRepoVolumeSource] = None,
host_path: Optional[HostPathVolumeSource] = None,
persistent_volume_claim: Optional[PersistentVolumeClaimVolumeSource] = None,
projected: Optional[ProjectedVolumeSource] = None,
secret: Optional[SecretVolumeSource] = None,
aws_elastic_block_store: Optional[AWSElasticBlockStoreVolumeSource] = None,
azure_disk: Optional[AzureDiskVolumeSource] = None,
azure_file: Optional[AzureFileVolumeSource] = None,
cephfs: Optional[CephFSVolumeSource] = None,
cinder: Optional[CinderVolumeSource] | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.175148,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.20872,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0255217,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.222735,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.1712,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.159457,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.257199,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.129825,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.546482,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.156125,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.41086,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0323433,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00668836,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0565632,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0494645,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0889065,
'Execution Unit/Register Files/Runtime Dynamic': 0.0561529,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125545,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.321206,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55195,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00153566,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00153566,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00138658,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00056358,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000710562,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00516846,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0129723,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0475515,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.02469,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.154015,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.161506,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.39,
'Instruction Fetch Unit/Runtime Dynamic': 0.381214,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0286222,
'L2/Runtime Dynamic': 0.00541735,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.6638,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.688061,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0461566,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0461567,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.88177,
'Load Store Unit/Runtime Dynamic': 0.961847,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.113814,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.227629,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0403931,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0408064,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.188064,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0252976,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.413561,
'Memory Management Unit/Runtime Dynamic': 0.066104,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.7143,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0850806,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00822969,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0794967,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
import unittest
import pytest
_marker = object()
class _KeywordIndexTestsBase:
def _getTargetClass(self):
from . import KeywordIndex
return KeywordIndex
def _populate(self, index):
index.index_doc(1, ("zope", "CMF", "Zope3"))
index.index_doc(2, ("the", "quick", "brown", "FOX"))
index.index_doc(3, ("Zope",))
index.index_doc(4, ())
index.index_doc(5, ("cmf",))
_populated_doc_count = 4
_populated_word_count = 9
def test_normalize(self):
index = self._makeOne()
assert index.normalize(["Foo"]) == ["Foo"]
def test_simplesearch(self):
index = self._makeOne()
self._populate(index)
self._search(index, [""], self.IFSet())
self._search(index, "cmf", self.IFSet([5]))
self._search(index, ["cmf"], self.IFSet([5]))
self._search(index, ["Zope"], self.IFSet([3]))
self._search(index, ["Zope3"], self.IFSet([1]))
self._search(index, ["foo"], self.IFSet())
def test_search_and(self):
index = self._makeOne()
self._populate(index)
self._search_and(index, ("CMF", "Zope3"), self.IFSet([1]))
self._search_and(index, ("CMF", "zope"), self.IFSet([1]))
self._search_and(index, ("cmf", "zope4"), self.IFSet())
self._search_and(index, ("quick", "FOX"), self.IFSet([2]))
def test_search_or(self):
index = self._makeOne()
self._populate(index)
self._search_or(index, ("cmf", "Zope3"), self.IFSet([1, 5]))
self._search_or(index, ("cmf", "zope"), self.IFSet([1, 5]))
self._search_or(index, ("cmf", "zope4"), self.IFSet([5]))
self._search_or(index, ("zope", "Zope"), self.IFSet([1, 3]))
def test_apply(self):
index = self._makeOne()
self._populate(index)
self._apply(index, ("CMF", "Zope3"), self.IFSet([1]))
self._apply(index, ("CMF", "zope"), self.IFSet([1]))
self._apply(index, ("cmf", "zope4"), self.IFSet())
self._apply(index, ("quick", "FOX"), self.IFSet([2]))
def test_apply_and(self):
index = self._makeOne()
self._populate(index)
self._apply_and(index, ("CMF", "Zope3"), self.IFSet([1]))
self._apply_and(index, ("CMF", "zope"), self.IFSet([1]))
self._apply_and(index, ("cmf", "zope4"), self.IFSet())
self._apply_and(index, ("quick", "FOX"), self.IFSet([2]))
def test_apply_or(self):
index = self._makeOne()
self._populate(index)
self._apply_or(index, ("cmf", "Zope3"), self.IFSet([1, 5]))
self._apply_or(index, ("cmf", "zope"), self.IFSet([1, 5]))
self._apply_or(index, ("cmf", "zope4"), self.IFSet([5]))
self._apply_or(index, ("zope", "Zope"), self.IFSet([1, 3]))
def test_apply_with_only_tree_set(self):
index = self._makeOne()
index.tree_threshold = 0
self._populate(index)
assert type(index._fwd_index["zope"]) == type(self.IFTreeSet())
self._apply_and(index, ("CMF", "Zope3"), self.IFSet([1]))
self._apply_and(index, ("CMF", "zope"), self.IFSet([1]))
self._apply_and(index, ("cmf", "zope4"), self.IFSet())
self._apply_and(index, ("quick", "FOX"), self.IFSet([2]))
def test_apply_with_mix_of_tree_set_and_simple_set(self):
index = self._makeOne()
index.tree_threshold = 2
self._populate(index)
assert type(index._fwd_index["zope"]) == type(self.IFSet())
self._apply_and(index, ("CMF", "Zope3"), self.IFSet([1]))
self._apply_and(index, ("CMF", "zope"), self.IFSet([1]))
self._apply_and(index, ("cmf", "zope4"), self.IFSet())
self._apply_and(index, ("quick", "FOX"), self.IFSet([2]))
def test_apply_doesnt_mutate_query(self):
# Some previous version of zope.index munged the query dict
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
query = {"operator": "or", "query": [5]}
result = index.apply(FrozenDict(query))
assert list(result) == [2, 3]
assert query == {"operator": "or", "query": [5]}
def test_applyAny(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyAny([5, 9])
assert list(result) == [2, 3, 4, 5]
def test_applyNotAny(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyNotAny([5, 9])
assert list(result) == [1]
def test_applyAll(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyAll([5, 9])
assert list(result) == []
def test_applyNotAll(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyNotAll([5, 9])
assert list(result) == [1, 2, 3, 4, 5]
def test_applyEq(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyEq(5)
assert list(result) == [2, 3]
def test_applyNotEq(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
result = index.applyNotEq(5)
assert list(result) == [1, 4, 5]
def test_applyNotEq_with_unindexed_docs(self):
def discriminator(obj, default):
if isinstance(obj, list):
return obj
return default
index = self._makeOne(discriminator)
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
index.index_doc(6, (5, 6))
result = index.applyNotEq(5)
assert list(result) == [1, 4, 5, 6]
def test_applyNotEq_nothing_indexed(self):
def discriminator(obj, default):
return default
index = self._makeOne(discriminator)
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
index.index_doc(6, (5, 6))
result = index.applyNotEq(5)
assert list(result) == [1, 2, 3, 4, 5, 6]
def test_optimize_converts_to_tree_set(self):
index = self._makeOne()
self._populate(index)
assert type(index._fwd_index["zope"]) == type(self.IFSet())
index.tree_threshold = 0
index.optimize()
assert type(index._fwd_index["zope"]) == type(self.IFTreeSet())
def test_docids(self):
index = self._makeOne()
index.index_doc(1, [1, 2, 3])
index.index_doc(2, [3, 4, 5])
index.index_doc(3, [5, 6, 7])
index.index_doc(4, [7, 8, 9])
index.index_doc(5, [9, 10])
index.index_doc(6, (5, 6))
assert set(index.docids()) == set((1, 2, 3, 4, 5, 6))
def test_docids_with_indexed_and_not_indexed(self):
index = self._makeOne()
index.index_doc(1, [1])
index.index_doc(2, _marker)
assert set([1, 2]) == set(index.docids())
def test_optimize_converts_to_simple_set(self):
index = self._makeOne()
index.tree_threshold = 0
self._populate(index)
assert type(index._fwd_index["zope"]) == type(self.IFTreeSet())
index.tree_threshold = 99
index.optimize()
assert type(index._fwd_index["zope"]) == type(self.IFSet())
def test_optimize_leaves_words_alone(self):
index = self._makeOne()
self._populate(index)
assert type(index._fwd_index["zope"]) == type(self.IFSet())
index.tree_threshold = 99
index.optimize()
assert type(index._fwd_index["zope"]) == type(self.IFSet())
def test_index_with_empty_sequence_unindexes(self):
index = self._makeOne()
self._populate(index)
self._search(index, "cmf", self.IFSet([5]))
index.index_doc(5, ())
self._search(index, "cmf", self.IFSet([]))
class _ThirtyTwoBitBase:
def _get_family(self):
import BTrees
return BTrees.family32
def IFSet(self, *args, **kw):
from BTrees.IFBTree import IFSet
return IFSet(*args, **kw)
def IFTreeSet(self, *args, **kw):
from BTrees.IFBTree import IFTreeSet
return IFTreeSet(*args, **kw)
class _SixtyFourBitBase:
def _get_family(self):
import BTrees
return BTrees.family64
def IFSet(self, *args, **kw):
from BTrees.LFBTree import LFSet
return LFSet(*args, **kw)
def IFTreeSet(self, *args, **kw):
from BTrees.LFBTree import LFTreeSet
return LFTreeSet(*args, **kw)
class _TestCaseBase:
def _makeOne(self, discriminator=_marker, family=_marker):
def _discriminator(obj, default):
if obj is _marker:
return default
return obj
if discriminator is _marker:
discriminator = _discriminator
if family is _marker:
family = self._get_family()
return self._getTargetClass()(discriminator=discriminator, family=family)
def _search(self, index, query, expected, mode="and"):
results = index.search(query, mode)
# results and expected are IFSets() but we can not
# compare them directly since __eq__() does not seem
# to be implemented for BTrees
assert results.keys() == expected.keys()
def _search_and(self, index, query, expected):
return self._search(index, query, expected, "and")
def _search_or(self, index, query, expected):
return self._search(index, query, expected, "or")
def _apply(self, index, query, expected, mode="and"):
results = index.apply(query)
assert results.keys() == expected.keys()
def _apply_and(self, index, query, expected):
results = index.apply({"operator": "and", "query": query})
assert results.keys() == expected.keys()
def _apply_or(self, index, query, expected):
results = index.apply({"operator": "or", "query": query})
assert results.keys() == expected.keys()
def test_class_conforms_to_IIndexInjection(self):
from zope.interface.verify import verifyClass
from hypatia.interfaces import IIndexInjection
verifyClass(IIndexInjection, self._getTargetClass())
def test_instance_conforms_to_IIndexInjection(self):
from zope.interface.verify import verifyObject
from hypatia.interfaces import IIndexInjection
verifyObject(IIndexInjection, self._makeOne())
def test_class_conforms_to_IIndexStatistics(self):
from zope.interface.verify import verifyClass
from hypatia.interfaces import IIndexStatistics
verifyClass(IIndexStatistics, self._getTargetClass())
def test_instance_conforms_to_IIndexStatistics(self):
from zope.interface.verify import verifyObject
from hypatia.interfaces import IIndexStatistics
verifyObject(IIndexStatistics, self._makeOne())
def test_class_conforms_to_IKeywordQuerying(self):
from zope.interface.verify import verifyClass
from .interfaces import IKeywordQuerying
verifyClass(IKeywordQuerying, self._getTargetClass())
def test_instance_conforms_to_IKeywordQuerying(self):
from zope.interface.verify import verifyObject
from .interfaces import IKeywordQuerying
verifyObject(IKeywordQuerying, self._makeOne())
def test_class_conforms_to_IIndex(self):
from zope.interface.verify import verifyClass
from hypatia.interfaces import IIndex
verifyClass(IIndex, self._getTargetClass())
def test_instance_conforms_to_IIndex(self):
from zope.interface.verify import verifyObject
from hypatia.interfaces import IIndex
verifyObject(IIndex, self._makeOne())
def test_document_repr(self):
index = self._makeOne()
self._populate(index)
assert "CMF" in index.document_repr(1)
assert index.document_repr(50, True) == True
def test_ctor_defaults(self):
index = self._makeOne()
assert index.family is self._get_family()
def test_ctor_explicit_family(self):
import BTrees
index = self._makeOne(family=BTrees.family32)
assert index.family is BTrees.family32
def test_ctor_callback_discriminator(self):
def _discriminator(obj, default):
""" """
index = self._makeOne(discriminator=_discriminator)
assert index.discriminator is _discriminator
def test_ctor_string_discriminator(self):
index = self._makeOne(discriminator="abc")
assert index.discriminator == "abc"
def test_ctor_bad_discriminator(self):
with pytest.raises(ValueError):
self._makeOne(object())
def test_empty_index(self):
index = self._makeOne()
assert index.indexed_count() == 0
assert index.word_count() == 0
assert not index.has_doc(1)
def test_index_doc_string_value_raises(self):
index = self._makeOne()
with pytest.raises(TypeError):
index.index_doc(1, "albatross")
def test_index_doc_single(self):
index = self._makeOne()
index.index_doc(1, ("albatross", "cormorant"))
assert index.indexed_count() == 1
assert index.word_count() == 2
assert index.has_doc(1)
assert "albatross" in index._fwd_index
assert "cormorant" in index._fwd_index
def test_index_doc_existing(self):
index = self._makeOne()
index.index_doc(1, ("albatross", "cormorant"))
index.index_doc(1, ("buzzard", "cormorant"))
assert index.indexed_count() == 1
assert index.word_count() == 2
assert index.has_doc(1)
assert not ("albatross" in index._fwd_index)
assert "buzzard" in index._fwd_index
assert "cormorant" in index._fwd_index
def test_index_doc_many(self):
index = self._makeOne()
self._populate(index)
assert index.indexed_count() == self._populated_doc_count
assert index.word_count() == self._populated_word_count
for docid in range(1, 6):
if docid == 4:
assert not index.has_doc(docid)
else:
assert index.has_doc(docid)
def test_index_doc_then_missing_value(self):
index = self._makeOne()
index.index_doc(20, [1, 2, 3])
assert set([20]) == set(index.applyEq(2))
assert 20 in index.docids()
index.index_doc(20, _marker)
assert set() == set(index.applyEq(2))
assert 20 in index.docids()
def test_index_doc_missing_value_then_with_value(self):
index = self._makeOne()
index.index_doc(3, _marker)
assert set() == set(index.applyEq(4))
assert 3 in index.docids()
index.index_doc(3, [3, 4, 5])
assert set([3]) == set(index.applyEq(4))
assert 3 in index.docids()
def test_index_doc_missing_value_then_unindex(self):
index = self._makeOne()
index.index_doc(3, _marker)
assert set() == set(index.applyEq(4))
assert 3 in index.docids()
index.unindex_doc(3)
assert set() == set(index.applyEq(4))
docids = index.docids()
assert not (3 in docids)
def test_index_doc_value_is_marker(self):
index = self._makeOne()
# this should never be raised
index.unindex_doc = lambda *arg, **kw: 0 / 1
index.index_doc(1, | |
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404, redirect
from django.template.loader import render_to_string
from django.http.response import HttpResponse
from django.contrib import messages
from .forms import CourseCreate
from django.urls import reverse
from django.http import Http404
from django.views import View
from .models import Course, Module, Content, CourseUserRelations
from exams.models import Exam, ExamUserRelations
import json, re
import base64
import io
import sys
from django.core.files.uploadedfile import InMemoryUploadedFile
from mimetypes import guess_extension
class CoursesViewFunctions(object):
courses = {}
course_data = {}
def get_courses(self):
course = Course.objects.all()
len(course)
if len(course) == 0:
return {"empty": True}
paginator = Paginator(course, 6)
page = self.request.GET.get("page")
try:
self.courses["courses_instances"] = paginator.get_page(page)
except PageNotAnInteger:
self.courses["courses_instances"] = paginator.get_page(1)
except EmptyPage:
self.courses["courses_instances"] = paginator.get_page(paginator.num_pages)
return self.courses
def get_owner_courses_exams(self):
user = {"CoursesEmpty": False, "ExamsEmpty": False}
if self.request.user.is_admin:
course = Course.objects.all()
exam = Exam.objects.all()
else:
course = Course.objects.filter(user=self.request.user)
exam = Exam.objects.filter(user=self.request.user)
if len(course) == 0:
user["CoursesEmpty"] = True
else:
course_paginator = Paginator(course, 3)
course_page = self.request.GET.get("coursepage")
try:
user["Courses"] = course_paginator.get_page(course_page)
except PageNotAnInteger:
user["Courses"] = course_paginator.get_page(1)
except EmptyPage:
user["Courses"] = course_paginator.get_page(course_paginator.num_pages)
if len(exam) == 0:
user["ExamsEmpty"] = True
else:
exam_paginator = Paginator(exam, 3)
exam_page = self.request.GET.get("exampage")
try:
user["Exams"] = exam_paginator.get_page(exam_page)
except PageNotAnInteger:
user["Exams"] = exam_paginator.get_page(1)
except EmptyPage:
user["Exams"] = exam_paginator.get_page(exam_paginator.num_pages)
return user
def get_course_instance(self):
id = self.kwargs.get("id")
course = None
if id is not None:
course = get_object_or_404(Course, id=id)
return course
def get_module_instance(self):
id = self.kwargs.get("idModule")
module = None
if id is not None:
module = get_object_or_404(Module, id=id)
return module
def get_content_instance(self):
id = self.kwargs.get("idContent")
content = None
relation = None
if id is not None:
content = get_object_or_404(Content, id=id)
if type(content).__name__ == "Content":
if content.content_type == "5":
try:
relation = ExamUserRelations.objects.get(
user=self.request.user, exam=content.exam
)
except ExamUserRelations.DoesNotExist:
relation = None
return {"content": content, "relation": relation}
return content
def get_course_user_relation(self):
user = {"courses_empty": False}
course = CourseUserRelations.objects.filter(user=self.request.user)
if len(course) == 0:
user["courses_empty"] = True
else:
course_paginator = Paginator(course, 3)
course_page = self.request.GET.get("coursepage")
try:
user["courses_relations"] = course_paginator.get_page(course_page)
except PageNotAnInteger:
user["courses_relations"] = course_paginator.get_page(1)
except EmptyPage:
user["courses_relations"] = course_paginator.get_page(
course_paginator.num_pages
)
return user
def relation_exist(self):
id = self.kwargs.get("id")
if id is None:
return None
try:
course_instance = Course.objects.get(id=id)
except Course.DoesNotExist:
return None
try:
relation = CourseUserRelations.objects.get(
user=self.request.user, course=course_instance
)
return True
except CourseUserRelations.DoesNotExist:
return False
def to_file(self, image, picture_name):
"""base64 encoded file to Django InMemoryUploadedFile that can be placed into request.FILES."""
# 'data:image/png;base64,<base64 encoded string>'
try:
idx = image[:50].find(",")
if not idx or not image.startswith("data:image/"):
raise Exception()
base64file = image[idx + 1 :]
attributes = image[:idx]
content_type = attributes[len("data:") : attributes.find(";")]
except Exception as e:
return None
f = io.BytesIO(base64.b64decode(base64file))
image = InMemoryUploadedFile(
f,
field_name="banner",
name=picture_name + guess_extension(content_type),
content_type=content_type,
size=sys.getsizeof(f),
charset=None,
)
return image
class CourseCreateView(CoursesViewFunctions, View):
template_name = "courses/courseCreateView.html"
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("pages:login-page")
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
return render(
request,
self.template_name,
{
"course_form": CourseCreate(),
"exams_instances": Exam.objects.filter(content=None, user=request.user),
},
)
def post(self, request, *args, **kwargs):
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
if request.is_ajax:
course_data = json.loads(self.request.body)["Course"]
banner = self.to_file(
course_data.pop("banner"), course_data["name"] + "_banner"
)
course_modules_data = course_data.pop("Modules")
if banner is not None:
request.FILES["banner"] = banner
course = CourseCreate(
course_data, request.FILES, modules=course_modules_data
)
if course.is_valid():
course_instance = course.save(commit=False)
course_instance.user = self.request.user
course_instance.save(modules_list_forms=course.modules_forms())
messages.success(request, "Course created successfully")
return HttpResponse(
json.dumps(
{
"error": False,
"url_redirect": reverse("courses:course-owner-list"),
}
)
)
messages.error(request, "There is an error in the form.")
html = (
render_to_string(
self.template_name,
{
"course_form": course,
"exams_instances": Exam.objects.filter(
content=None, user=request.user
),
},
request,
)
+ ""
)
body = re.findall("<body>(.*?)</body>", html, re.DOTALL)
return HttpResponse(json.dumps({"error": True, "content": body}))
messages.error(request, "An error has occurred, please try again later.")
return redirect("pages:home-page")
class CourseListView(CoursesViewFunctions, View):
template_name = "courses/courseListView.html"
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("pages:login-page")
return render(request, self.template_name, self.get_courses())
class CourseMyListView(CoursesViewFunctions, View):
template_name = "courses/courseMyListView.html"
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("pages:login-page")
return render(request, self.template_name, self.get_course_user_relation())
class CourseExamManageView(CoursesViewFunctions, View):
template_name = "courses/courseExamManageView.html"
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("pages:login-page")
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
return render(request, self.template_name, self.get_owner_courses_exams())
class CourseDetailView(CoursesViewFunctions, View):
template_name = "courses/courseDetailView.html"
def get(self, request, id=None, *args, **kwargs):
course = self.get_course_instance()
if course is not None:
owner = False
registered = False
if request.user.is_authenticated:
owner = True if self.request.user == course.user else False
registered = True if self.relation_exist() else False
return render(
request,
self.template_name,
{"course_instance": course, "Owner": owner, "Registered": registered},
)
messages.error(request, "that course doesn't exist")
return redirect("courses:course-list")
class CourseDeleteView(CoursesViewFunctions, View):
def get(self, request, id=None, *args, **kwargs):
return redirect("pages:home-page")
def post(self, request, *args, **kwargs):
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
course = self.get_course_instance()
if course is not None:
if (course.user == request.user) or request.user.is_admin:
course.delete()
messages.success(request, "Course delete")
return redirect("courses:course-owner-list")
return redirect("courses:course-list")
messages.error(request, "That course doesn't exist")
return redirect("courses:course-owner-list")
class CourseUpdateView(CoursesViewFunctions, View):
template_name = "courses/courseUpdateView.html"
def get(self, request, id=None, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("pages:login-page")
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
course = self.get_course_instance()
if course is not None:
return render(
request,
self.template_name,
{
"course_instance": course,
"course_form": CourseCreate(),
"exams_instances": Exam.objects.filter(
content=None, user=course.user
),
},
)
messages.error(request, "That course doesn't exist")
return redirect("courses:course-list")
def post(self, request, *args, **kwargs):
if not request.user.has_permissions:
messages.error(
request, "You do not have sufficient permissions to use this feature"
)
return redirect("pages:home-page")
if request.is_ajax:
courseObject = self.get_course_instance()
if courseObject is not None:
course_data = json.loads(self.request.body)["Course"]
banner = self.to_file(
course_data.pop("banner"), course_data["name"] + "_banner"
)
course_modules_data = course_data.pop("Modules")
if banner is not None:
request.FILES["banner"] = banner
course = CourseCreate(
course_data,
request.FILES,
modules=course_modules_data,
instance=self.get_course_instance(),
)
if course.is_valid():
course_instance = course.save(commit=False)
course_instance.save(
module_list=course.modules_forms(), update=True
)
messages.success(
request, "The course has been successfully edited."
)
return HttpResponse(
json.dumps(
{
"error": False,
"url_redirect": reverse("courses:course-owner-list"),
}
)
)
messages.error(request, "There is an error in the form.")
html = (
render_to_string(
self.template_name,
{
"course_instance": courseObject,
"course_form": course,
"exams_instances": Exam.objects.filter(
content=None, user=self.get_course_instance().user
),
},
request,
)
+ ""
)
body = re.findall("<body>(.*?)</body>", html, re.DOTALL)
return HttpResponse(json.dumps({"error": True, "content": body}))
messages.error(request, "that course doesn't exist")
return redirect("courses:course-list")
messages.error(request, "An error has occurred, please try again later")
return redirect("pages:home-page")
class CourseEnrollView(CoursesViewFunctions, View):
def get(self, request, id=None, *args, **kwargs):
return redirect("pages:home-page")
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated:
messages.warning(request, "you need to login to register for a course")
return redirect("pages:login-page")
relation = self.relation_exist()
course = self.get_course_instance()
if relation:
messages.success(request, "You are already enrolled in this course.")
return redirect(course.get_detail_url())
elif not relation:
CourseUserRelations.objects.create(user=self.request.user, course=course)
messages.success(request, "You have successfully enrolled in the course.")
return redirect(course.get_detail_url())
messages.error(request, "An error has occurred, please try again later.")
return redirect("courses:course-list")
class CourseHomeView(CoursesViewFunctions, View):
template_name = "courses/courseHomeView.html"
def get(self, request, id, *args, **kwargs):
if not request.user.is_authenticated:
messages.warning(request, "you need to login to register for a course.")
return redirect("pages:login-page")
registered = True if self.relation_exist() else False
course = self.get_course_instance()
if not self.relation_exist():
messages.success(
request, "You need to register to view the course content.",
)
return redirect(course.get_detail_url())
module = Module.objects.filter(course=course)[0]
return render(
request,
self.template_name,
{"course_instance": course, "module_instance": module,},
)
class ModuleHomeView(CoursesViewFunctions, View):
template_name = "courses/courseHomeView.html"
def get(self, request, id, *args, **kwargs):
if not request.user.is_authenticated:
messages.warning(request, "you need to login to register for a course.")
return redirect("pages:login-page")
registered = True if self.relation_exist() else False
course = self.get_course_instance()
if not self.relation_exist():
messages.success(
request, "You need to register to view the course content.",
)
return redirect(course.get_detail_url())
module = self.get_module_instance()
return render(
request,
self.template_name,
{"course_instance": course, "module_instance": module,},
)
class ContentHomeView(CoursesViewFunctions, View):
template_name = "courses/courseContentView.html"
def get(self, request, id, *args, **kwargs):
if not request.user.is_authenticated:
messages.warning(request, "you need to login to register for a course.")
return redirect("pages:login-page")
registered = True if self.relation_exist() else False
course = self.get_course_instance()
if not self.relation_exist():
messages.success(
request, "You need to register to view the course content.",
)
return redirect(course.get_detail_url())
module = self.get_module_instance()
content = self.get_content_instance()
if type(content).__name__ != "dict":
return render(
request,
self.template_name,
{
"course_instance": course,
"module_instance": module,
"content_instance": content,
},
)
return render(
request,
self.template_name,
{
"course_instance": course,
"module_instance": module,
"content_instance": | |
= random.randint(0, 9)
if roll > self.defenseRating:
self.sink()
def isDamaged(self):
return self.damage > 0
def isSunk(self):
return self.damage == -1
def sink(self):
self.damage = -1
if globals.verbose_combat:
print('Sub ' + str(self.name) + 'sinks')
def retreat(self):
self.rtb = True
if self.column:
self.column.sub_positions.remove(self)
def eligibleForPromotion(self):
return self.tonsSunk >= 23 and self.targetsSunk >= 3
def claimTarget(self, target):
if globals.verbose_combat:
print('Sub ' + self.name + ' is claiming target ' + str(target))
self.tonsSunk += target.tons
self.targetsSunk += 1
def canReAttack(self):
return self.damage == 0 and self.rtb == False
def promoteSkipper(self):
self.skipper = min(self.skipper + 1, 2)
if globals.verbose_combat:
print('Sub ' + str(self.name) + ' promotes her skipper to level ' + str(self.skipper))
def improvePosition(self):
currentEntryReq = 0
if self.column.entry:
currentEntryReq = self.column.entry
for a in self.column.adjacent:
if a.entry and a.entry > currentEntryReq and a.hasFreeSubSlot():
oc = self.column
# remove from current column and add to new one
self.column.sub_positions.remove(self)
a.sub_positions.append(self)
if globals.verbose_combat:
print('Sub ' + str(self) + ' moves from column ' + str(oc) + ' to ' + str(a))
return
class Wolfpack:
def __init__(self, name, subs):
self.subs = subs
self.name = name
def __repr__(self):
return self.name +'(' + str(len(self.subs)) + ' boats)'
def getEvent(type) :
e = Encounter(type, None, None, None, None);
e.validTarget = False;
return e
def getAircraft(nation, asw):
ac = Encounter('AC', nation, None, None, asw);
ac.validTarget = False
return ac
def getWarship(type, name, tons, defense, asw):
ws = Encounter(type, 'british', tons, defense, asw)
ws.name = name
return ws
def getDD(nation, tons, diligent):
dd = Encounter('DD', nation, tons, random.randint(6,7), random.randint(1,3))
dd.diligent = diligent
return dd
def getES(nation, diligent, wp):
# escorts: DD, DE, PF, CT, SL, TB, PG, CG
pool = []
if diligent:
pool.append(('SL', 1, 6, 6))
if wp >= 4:
pool.append(('SL', 1, 6, 5))
pool.append(('CT', 1, 6, 4))
if wp == 5:
pool.append(('CT', 1, 6, 3))
pool.append(('CT', 1, 6, 3))
else:
pool.append(('ES', 1, 6, 2))
pool.append(('SL', 1, 5, 0.5))
pool.append(('FP', 1, 8, 2))
pool.append(('ML', 3, 4, 0))
if wp >= 2:
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 2))
pool.append(('CT', 1, 6, 2))
if wp >= 3:
pool.append(('DE', 1, 6, 2))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 1))
pool.append(('CT', 1, 6, 2))
pool.append(('CT', 1, 6, 2))
pool.append(('CT', 1, 6, 2))
pool.append(('CT', 1, 6, 2))
pool.append(('CT', 1, 6, 2))
if wp >= 4:
pool.append(('SL', 2, 6, 2))
if wp == 5:
pool.append(('DE', 1, 7, 3))
e = random.choice(pool)
es = Encounter(e[0], nation, e[1], e[2], e[3])
es.diligent = diligent
return es
def getAM(nation):
return Encounter('AM', nation, 2, 2, 1)
def getMerchant(nation, hvy):
m = Encounter('M', nation, 0, 0, 0);
try:
a = getMerchant._light
except AttributeError:
# lgt merchant distribution
# 8x 1t 0-0
# 16x 2t 0-0/1-0
# 9x 3t 0-0/1-0
# 20x 4t 1-0
# 35x 5 1-0/3-0
# stored as weight-defense
getMerchant._light = []
for i in range(0,8):
getMerchant._light.append((1, 0))
for i in range(0,16):
getMerchant._light.append((2,0))
for i in range(0,9):
getMerchant._light.append((3,random.randint(0,1)))
for i in range(0,20):
getMerchant._light.append((4,1))
for i in range(0,35):
getMerchant._light.append((5,random.randint(1,3)))
random.shuffle(getMerchant._light)
# hvymerchant distribution:
# 14x 6t 0-0/2-0
# 16x 7t 1-0/2-0
# 11x 8t 2-0
# 9x 9t 3-0
# 5x 10t 3-0
# 2x 11t 3-0
# 2x 12t 3-0
# 14t 3-0
# 17t 3-0
# 20t 4-0
getMerchant._heavy =[]
for i in range(0, 14):
getMerchant._heavy.append((6,random.randint(0,2)))
for i in range(0, 16):
getMerchant._heavy.append((7,random.randint(1,2)))
for i in range(0,11):
getMerchant._heavy.append((8,2))
for i in range(0,9):
getMerchant._heavy.append((9,3))
for i in range(0,5):
getMerchant._heavy.append((10,3))
getMerchant._heavy.append((11,3))
getMerchant._heavy.append((11,3))
getMerchant._heavy.append((12,3))
getMerchant._heavy.append((12,3))
getMerchant._heavy.append((14,3))
getMerchant._heavy.append((17,3))
getMerchant._heavy.append((20,4))
random.shuffle(getMerchant._heavy)
if hvy == True:
s = random.choice(getMerchant._heavy)
m.tons = s[0]
m.defense = s[1]
else:
s = random.choice(getMerchant._light)
m.tons = s[0]
m.defense = s[1]
return m;
def getSV(nation, tons):
return Encounter('SV', nation, tons, 0, 0)
def getFV():
return Encounter('FV', 'british', 1, 0, 0);
def getMerchants(count, nation, heavy):
ms = []
for i in range(0, count):
ms.append(getMerchant(nation, heavy))
return ms
def getDDs(count, nation, tons, diligent):
dds = []
for i in range(0, count):
dds.append(getDD(nation, tons, diligent))
return dds
def getESs(count, nation, diligent, wp):
es = []
for i in range(0, count):
es.append(getES(nation, diligent, wp))
return es
def seedCup(config, wp):
''' Note the config is just the column of a single cup/wp read top to bottom.
Fill in zeros for no value'''
cup = []
if len(config) < 57:
print('error -- configuration vector wrong size!')
# events [0, 1]
cup.append(getEvent('Event'))
if config[1]:
cup.append(getEvent('Draw Liner'))
# convoy/loner aircraft [2, 11]
if config[2]:
cup.append(getAircraft('british', 0.5))
# ... more
# convoy/loner warships [12, 33]
# note: these values are just placeholders for now
if config[12]:
cup.append(getWarship('CV', 'Courageous', 27, 4, 0))
if config[13]:
cup.append(getWarship('CVE', 'Avenger', 15, 4, 0))
if config[14]:
cup.append(getWarship('CV', 'Audacity', 20, 6, 1))
if config[15]:
cup.append(getWarship('CA', 'Malaya', 10, 8, 0))
if config[16]:
cup.append(getWarship('CA', 'Ramilles', 10, 8, 1))
if config[17]:
cl = getWarship('CL', 'D 6t', 6, 4, 1)
cl.fast = True
cup.append(cl)
if config[18]:
cup.append(getAM('british'))
cup += getDDs(config[19], 'british/aus/greek/nor', 1, False)
cup += getDDs(config[20], 'british/aus/netherland/vichy/poland', 2, False)
cup += getESs(config[21], 'british/nor/vichy/poland', False, wp)
cup += getDDs(config[22], 'british', 2, True)
cup += getESs(config[23], 'british', True, wp)
cup += getDDs(config[24], 'canadian', 2, False)
cup += getDDs(config[25], 'canadian', 2, True)
#CTs here
cup += getDDs(config[28], 'french', 2, False)
if config[29]:
cup.append(Encounter('TB', 'french', 1, 1, 1))
cup += getDDs(config[30], 'us', 1, False)
cup += getDDs(config[31], 'us', 2, False)
# merchants [34, 56]
cup += getMerchants(config[34], 'british', True)
cup += getMerchants(config[35], 'british', False)
cup += getMerchants(config[36], 'french', True)
cup += getMerchants(config[37], 'french', False)
cup += getMerchants(config[38], 'nor/dutch/belg', True)
cup += getMerchants(config[39], 'nor/dutch/belg', False)
# [40]
cup += getMerchants(config[40], 'canadian', True)
cup += getMerchants(config[41], 'canadian', False)
cup += getMerchants(config[42], 'danish/finnish', True)
cup += getMerchants(config[43], 'danish/finnish', False)
cup += getMerchants(config[44], 'swedish', True)
cup += getMerchants(config[45], 'swedish', False)
cup += getMerchants(config[46], 'panamian', True)
cup += getMerchants(config[47], 'panamian', False)
cup += getMerchants(config[48], 'greek', True)
cup += getMerchants(config[49], 'greek', False)
# [50]
cup += getMerchants(config[50], 'us/brazil/soviet', True)
cup += getMerchants(config[51], 'us/brazil/soviet', False)
if config[52]:
cup.append(Encounter('CM', 'british', 7, 4, 1))
if config[53] > 0:
cup.append(getSV('finnish', 2))
if config[54] > 0:
cup.append(getSV('us', 2))
if config[55]:
cup.append(getSV('british', 1))
if config[56]:
cup.append(getFV())
#print(cup);
return cup
def printCup(cup):
for c in cup:
print(c)
def seedTDCCup(warperiod):
dist = []
if warperiod == 1:
dist = [1,3,7,8,8,7,4,1,1]
if warperiod == 2:
dist = [1,3,7,8,9,8,2,1,1]
if warperiod == 3:
dist = [1,2,7,6,9,8,6,2,1]
if warperiod == 4:
dist = [1,3,7,8,10,8,4,2,1]
if warperiod == 5:
dist = [1,2,7,8,9,8,7,3,1]
def repeat(value, count):
return [value]*count
cups['tdc'] = []
mod = -4
for i in dist:
cups['tdc'] += repeat(mod, i)
mod += 1
#print('Reshuffled TDC cup for WP' + str(warperiod) + ': ', cups['tdc'])
random.shuffle(cups['tdc'])
def drawTDCCounter():
return random.choice(cups['tdc'])
def seedCups(wp):
cups['loner'] = []
cups['outer'] = []
cups['inner'] = []
cups['center'] = []
cups['west inner'] = []
cups['west outer'] = []
# WP1 seed
if wp == 1:
cups['loner'] = seedCup((1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,5,14,1,2,2,3,0,0,0,3,0,2,0,0,1,1,0,0,0,1,0,1,1), 1)
cups['outer'] = seedCup((1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,1,0,1,0,0,0,0,1,1,0,0,0,0,3,12,0,2,2,5,0,0,0,2,0,1,0,0,0,1,0,0,0,0,0,0,0), 1)
cups['inner'] = seedCup((1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,7,8,0,1,1,3,0,1,0,1,1,1,0,0,0,2,0,0,0,0,0,0,0,0), 1)
cups['center'] = seedCup((1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,13,5,1,0,4,2,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0), 1)
if wp == 2:
cups['loner'] = seedCup((1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,6,9,0,1,3,6,0,1,0,2,0,2,0,0,0,3,0,0,0,1,0,1,1), 2)
cups['outer'] = seedCup((1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,4,12,0,1,1,4,0,0,0,3,0,1,0,0,0,1,0,0,0,0,0,0,0), 2)
cups['inner'] = seedCup((1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,9,9,1,0,3,2,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0), 2)
cups['center'] = seedCup((1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,16,5,0,0,3,1,1,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0), 2)
if wp == 3:
cups['loner'] =seedCup((1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,4,12,0,1,2,2,0,0,0,1,0,1,0,1,0,2,0,1,0,1,0,1,1),3)
cups['outer'] = seedCup((1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,3,1,1,0,0,0,0,0,0,0,0,0,0,2,12,0,0,1,4,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,),3)
cups['inner'] = seedCup((1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,2,1,0,0,0,0,0,0,0,0,0,0,0,8,8,0,1,3,2,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0),3)
cups['center'] = seedCup((1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,14,2,1,0,2,0,1,0,1,0,1,0,1,0,1,0,0,0,1,0,0,0,0),3)
cups['west inner'] = seedCup((1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,2,0,0,0,0,0,0,8,7,0,1,3,3,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0),3)
cups['west outer'] = seedCup((1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1,0,0,3,0,0,0,0,0,0,3,10,0,0,0,4,0,1,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0),3)
if wp >= 4:
print('WP4+ not yet implemented!')
if globals.verbose_combat:
print('loners: ', len(cups['loner']))
print('outer : ', len(cups['outer']))
print('inner : ', len(cups['inner']))
print('center: ', len(cups['center']))
print('west inner: ', len(cups['west inner']))
print('west outer: ', len(cups['west outer']))
#printCup(cups['loner'])
# from http://stackoverflow.com/questions/15389768/standard-deviation-of-a-list
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n # in Python 2 use sum(data)/float(n)
def _ss(data):
"""Return sum of square deviations of sequence data."""
c = mean(data)
ss = sum((x-c)**2 for x in data)
return ss
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5
def summarizeResults(results):
subsSunk = 0
subsDamaged = 0
subsSpotted = 0
subsRTB = 0
subsPromoted = 0
shipsSunk = []
shipsDamaged = []
shipsTonnage = []
for r in results:
if r.subSunk:
subsSunk += 1
if r.subSpotted:
subsSpotted += 1
if r.subDamaged:
subsDamaged += 1
if r.subRTB:
subsRTB += 1
shipsSunk.append(r.sunk)
shipsDamaged.append(r.damaged)
shipsTonnage.append(r.tons)
subsPromoted += r.subPromoted
shipsSunk.sort()
shipsTonnage.sort()
minSunk = 100
maxSunk = 0
meanSunk = 0.0
totalSunk = 0
for s in shipsSunk:
totalSunk += s
minSunk = min(minSunk, s)
maxSunk = max(maxSunk, s)
devSunk = pstdev(shipsSunk)
meanSunk = totalSunk / len(shipsSunk)
minTons = 100
maxTons = 0
meanTons = 0.0
totalTons = 0.0
for t in shipsTonnage:
totalTons += t
minTons = min(minTons, t)
maxTons = max(maxTons, t)
devTons = pstdev(shipsTonnage)
meanTons = totalTons / len(shipsTonnage)
print('Subs spotted:', subsSpotted, '/', len(results))
print('Subs damaged:', subsDamaged, '/', len(results))
print('Subs sunk:', subsSunk, '/', len(results))
print('Subs RTB:', subsRTB, '/', len(results))
print('Subs promoted:', subsPromoted, '/', len(results))
print('Ships sunk:', totalSunk, '[', minSunk, '-', maxSunk, '], mean', meanSunk, '/', devSunk)
print('Ships tonnage:', totalTons, '[', minTons, '-', maxTons, '] mean', meanTons, '/', devTons)
def writeResults(filename, results):
f = open(filename, 'w')
f.write('# name, tgt sunk, tgt tons, subsDmgd, subsSunk, subsSpotted, subsRTB, subsPromoted\n')
for r in results:
f.write(str(r.sub) + ',' + str(r.sunk) + ',' + str(r.tons) + ',' + str(r.subDamaged) + ',' + str(r.subSunk) + ',' + str(r.subSpotted) + ',' + str(r.subRTB) + ',' + str(r.subRTB) + '\n')
f.close()
def diligentEscortTable(sub):
roll = random.randint(0, 9)
if sub.inexperienced:
roll += 1
if roll == 6:
sub.spotted = True
if roll == 7:
sub.takeDamage(False)
if roll == 8:
sub.takeDamage(True)
if roll > 8:
sub.sink()
def revealCounters(convoy, sub):
sub.column.revealCounters(sub.tacRating)
# get revealed targets in the current and adjacent columns
revealed = sub.column.getVisibleTargets()
for a in sub.column.adjacent:
revealed += a.getVisibleTargets()
tdcCount = min(len(revealed), sub.tacRating)
if globals.verbose_combat:
print('Sub', sub.name, 'has', len(revealed), 'potential targets, placing', tdcCount, 'TDC markers, tactical rating:', sub.tacRating)
return revealed
def placeTDC(revealed, sub, combatRound):
# sorts targets by tonnage and if they are damaged
def getTargetPriority(tgt):
p = tgt.tons;
if tgt.damaged:
p += int(tgt.tons/2)
return p
tdcCount = min(len(revealed), sub.tacRating)
revealed.sort(key=getTargetPriority, reverse=True)
revealed = revealed[0:tdcCount]
# set TDC markers [14.14]
for r in revealed:
r.tdc = drawTDCCounter()
if globals.verbose_combat:
print('Target/TDC:', r, r.tdc)
# subtract 1 in the reattack round
if combatRound > 1:
if globals.verbose_combat:
print('Reattack round -- improving target solutions')
for r in revealed:
r.tdc = max(-4, r.tdc-1)
if globals.verbose_combat:
print('Target/TDC:', r, r.tdc)
# re-evaluate target priority with the tdcs
def getUpdatedTargetPriority(tgt):
p = tgt.tons
if tgt.damaged:
p += int(tgt.tons)
try:
# negative TDC values are good
p += tgt.tdc * -1
except KeyError:
return -100
return p
if | |
is None else f'"{failing_module}" [color=red];'
template = f"""\
digraph G {{
rankdir = LR;
node [shape=box];
{failing}
{edges}
}}
"""
arg = quote(template, safe="")
return f"https://dreampuf.github.io/GraphvizOnline/#{arg}"
def _get_source_of_module(self, module: types.ModuleType) -> str:
filename = getattr(module, "__file__", None)
result = (
None
if filename is None or not filename.endswith(".py")
else linecache.getlines(filename, module.__dict__)
)
if result is None:
extra = ""
if self.verbose:
extra = f" See the dependency graph for more info: \n{self._write_dep_graph(module.__name__)}"
raise ValueError(
f'cannot save source for module "{module.__name__}" because '
f'its source file "{filename}" could not be found.{extra}'
)
return "".join(result)
def require_module_if_not_provided(self, module_name: str, dependencies=True):
if self._module_is_already_provided(module_name):
return
self.require_module(module_name, dependencies)
def require_module(self, module_name: str, dependencies=True):
"""This is called by dependencies resolution when it finds that something in the package
depends on the module and it is not already present. It then decides how to provide that module.
The default resolution rules will mark the module as extern if it is part of the standard library,
and call `save_module` otherwise. Clients can subclass this object
and override this method to provide other behavior, such as automatically mocking out a whole class
of modules"""
root_name = module_name.split(".", maxsplit=1)[0]
if self._can_implicitly_extern(root_name):
if self.verbose:
print(
f"implicitly adding {root_name} to external modules "
f"since it is part of the standard library and is a dependency."
)
self.save_extern_module(root_name)
return
for i, (pattern, action, _) in enumerate(self.patterns):
if pattern.matches(module_name):
action(module_name)
self.matched_patterns.add(i)
return
self.save_module(module_name, dependencies)
def save_module(self, module_name: str, dependencies=True):
"""Save the code for `module_name` into the package. Code for the module is resolved using the `importers` path to find the
module object, and then using its `__file__` attribute to find the source code.
Args:
module_name (str): e.g. `my_package.my_subpackage`, code will be saved to provide code for this package.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
module = self._import_module(module_name)
source = self._get_source_of_module(module)
self.save_source_string(
module_name,
source,
hasattr(module, "__path__"),
dependencies,
module.__file__,
)
def save_pickle(
self, package: str, resource: str, obj: Any, dependencies: bool = True
):
"""Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into
the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects.
If `dependencies` is true, this method will also scan the pickled objects for which modules are required
to reconstruct them and save the relevant code.
To be able to save an object where `type(obj).__name__` is `my_module.MyObject`,
`my_module.MyObject` must resolve to the class of the object according to the `importer` order. When saving objects that
have previously been packaged, the importer's `import_module` method will need to be present in the `importer` list
for this to work.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
obj (Any): The object to save, must be picklable.
dependencies (bool, optional): If True, we scan the source for dependencies (see :ref:`Dependencies`).
"""
filename = self._filename(package, resource)
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = create_pickler(data_buf, self.importer)
pickler.persistent_id = self._persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
if dependencies:
all_dependencies = []
for opcode, arg, pos in pickletools.genops(data_value):
if opcode.name == "GLOBAL": # a global reference
assert isinstance(arg, str)
module, field = arg.split(" ")
if module not in all_dependencies:
all_dependencies.append(module)
for dep in all_dependencies:
self.debug_deps.append((package + "." + resource, dep))
if self.verbose:
dep_string = "".join(f" {dep}\n" for dep in all_dependencies)
print(f"{resource} depends on:\n{dep_string}\n")
for module_name in all_dependencies:
self.require_module_if_not_provided(module_name)
self._write(filename, data_value)
def save_text(self, package: str, resource: str, text: str):
"""Save text data to the package
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
text (str): The contents to save
"""
return self.save_binary(package, resource, text.encode("utf-8"))
def save_binary(self, package, resource, binary: bytes):
"""Save raw bytes to the package.
Args:
package (str): The name of module package this resource should go it (e.g. "my_package.my_subpackage")
resource (str): A unique name for the resource, used to indentify it to load.
binary (str): The data to save.
"""
filename = self._filename(package, resource)
self._write(filename, binary)
def mock(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Replace some required modules with a mock implementation. Mocked modules will return a fake
object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes
find files that are imported by model files but whose functionality is never used
(e.g. custom serialization code or training helpers).
Use this function to mock this functionality out without having to modify the original code.
Args:
include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings
for the names of the modules to be mocked out. Strings can also be a glob-style pattern
string that may match multiple modules. Any required dependencies that match this pattern
string will be mocked out automatically.
Examples:
'torch.**' -- matches torch and all submodules of torch, e.g. 'torch.nn' and torch.nn.functional'
'torch.*' -- matches 'torch.nn' or 'torch.functional', but not 'torch.nn.functional'
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
e.g. include='torch.**', exclude='torch.foo' will mock all torch packages except 'torch.foo' Default: []
allow_empty (bool): An optional flag that specifies whether the mock implementation(s) specified by this call
to the `mock` method must be matched to some module during packaging. If a mock is added with allow_empty=False,
and `close` is called (either explicitly or via `__exit__`) and the mock has not been matched to a module
used by the package being exported, an exception is thrown. If allow_empty=True, no such exception is thrown.
"""
self.patterns.append(
(_GlobGroup(include, exclude), self.save_mock_module, allow_empty)
)
def extern(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Include `module` in the list of external modules the package can import.
This will prevent dependency discover from saving
it in the package. The importer will load an external module directly from the standard import system.
Code for extern modules must also exist in the process loading the package.
Args:
include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
allow_empty (bool): An optional flag that specifies whether the extern modules specified by this call
to the `extern` method must be matched to some module during packaging. If an extern module glob pattern is added
with allow_empty=False, and `close` is called (either explicitly or via `__exit__`) before any modules match that
pattern, an exception is thrown. If allow_empty=True, no such exception is thrown.
"""
self.patterns.append(
(_GlobGroup(include, exclude), self.save_extern_module, allow_empty)
)
def deny(self, include: "GlobPattern", *, exclude: "GlobPattern" = ()):
"""Blocklist modules who names match the given glob patterns from the list of modules the package can import.
If a dependency on any matching packages is found, an error is thrown.
Args:
include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
"""
self.patterns.append(
(_GlobGroup(include, exclude), self._reject_denied_module, True)
)
def save_extern_module(self, module_name: str):
"""Add `module_name` to the list of external modules, regardless of whether it is
required by other modules.
Prefer using `extern` to only mark modules extern if they are actually required by the packaged code.
"""
if module_name not in self.extern_modules:
self.extern_modules.append(module_name)
def save_mock_module(self, module_name: str):
"""Add `module_name` to the package, implemented it with a mocked out version that
can be imported but does not include any implementations.
Prefer using `mock` to | |
ATOM LIST", atom_list
c = float(len(atom_list))
for a in atom_list:
if type(a) == type("a"):
a = atomCoord(a)
avg[0] += a[0]
avg[1] += a[1]
avg[2] += a[2]
return array([ avg[0]/c, avg[1]/c, avg[2]/c ], 'f')
def clusterAtoms(atoms, tol=2.0):
""" atoms [x,y,x,z,y,z,x, ...] => [ [x,x,x], [y,y], [ z,z,z,z,z,z,z,z], ... ]
input: PDBQT lines atoms
"""
tol = tol**2
atom_clusters = []
used = []
while len(atoms)>1:
def func(x): return dist(atoms[0], x, sq=False) <= tol
cluster = filter(func, atoms[1:]) + [atoms[0]]
reminder = []
for a in atoms: # XXX use a set here?
if not a in cluster:
reminder.append(a)
atoms = reminder[:]
atom_clusters.append(cluster)
if atoms:
atom_clusters.append(atoms)
#return atom_clusters
return sorted(atom_clusters, key=len, reverse=True)
def makePdb(coord, keyw = "ATOM ", at_index = 1, res_index = 1, atype = 'X', elem = None,
res = "CNT", chain ="Z", bfactor = 10,pcharge = 0.0):
if not elem: elem = atype
# padding bfactor
bfactor = "%2.2f" % bfactor
if len(bfactor.split(".")[0]) == 1:
bfactor = " "+bfactor
# ORG:
#atom = "%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 %02.2f %8.3f %1s" % (keyw,
atom = "%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 %s %8.3f %1s" % (keyw,
at_index, elem, res, chain, res_index,
coord[0], coord[1], coord[2], bfactor, pcharge, atype)
#print atom
return atom
def isValidPDBQTplus(f, mode='autodock'):
FP = open(f,'r')
l = FP.readline()
FP.close()
if mode=='autodock':
if l.startswith("USER ADVS_result>"): return True
elif mode=='vina':
if l.startswith("USER ADVS_Vina_result>"): return True
return false
def isValidVinaResult(f):
FP = open(f,'r')
l = FP.readlines(1)
FP.close()
if l.startswith("REMARK VINA RESULT:"):return True
return False
def getPDBQTroot(ligand):
"""
isolate and return the atoms defining the ROOT entity
in an AutoDock/Vina PDBQT file
"""
inside=False
root=[]
#for l in ligand:
# print l
for l in ligand:
if l.startswith('ENDROOT'):
#print "going out"
#for x in root:
# print x
return root
if inside:
#print "APPENDING"
root.append(l)
if l.startswith('ROOT'):
#print "we're inside"
inside=True
# XXX OLD MODEL
"""
def getModel(ligand, model=None):
#print "[ CALLED WITH MODEL=%s ]" % model
poses = []
inside=False
for l in ligand:
if inside: tmp.append(l)
if l.startswith('MODEL'):
tmp=[]
inside=True
if l.startswith('ENDMDL'):
poses.append(tmp)
inside=False
if len(poses)==0:
#print "PROBLEM HERE!!!"
return
if len(poses)==1:
#print "POSES UNIQUE",
return poses[0]
if model == None:
#print "ASKING EVERYTHING ?!?!?!",model,
return poses
else:
try:
#print "MODEL REQUESTED",model
return poses[model]
except:
#print "MODEL DEFAULT 0"
return poses[0]
"""
def getModel(ligand, model=None):
#print "[ CALLED WITH MODEL=%s ]" % model
poses = []
inside=False
for l in ligand:
if inside: tmp.append(l)
if l.startswith('MODEL'):
tmp=[]
inside=True
if l.startswith('ENDMDL'):
poses.append(tmp)
inside=False
if len(poses)==0:
print "PROBLEM HERE!!!"
return
#if len(poses)==1: # XXX DISABLED! INCONSISTENT
# #print "POSES UNIQUE",
# return poses[0]
if model == None:
#print "ASKING EVERYTHING ?!?!?!",model,
return poses
else:
try:
#print "MODEL REQUESTED",model
return poses[model]
except:
#print "MODEL DEFAULT 0"
return poses[0]
def isMultiModelPdb(ligand):
for l in ligand:
if l.startswith('MODEL'):
return True
return False
def getDockedLigandCentroid(ligand, model=None, pdb=True, bfactor=10):
pose = getModel(ligand,model=model)
#pose = getModel(ligand,model=0)
#print "==POSE LEN", len(pose)
root=getPDBQTroot(pose)
centroid=avgCoord(root)
if pdb: return makePdb(centroid, bfactor=bfactor)+"\nTER\n"
else: return centroid
def getReceptorResidues(filename=None, data=None):
"""Accepts a PDB(TQ) file and returns a
nested dictionary of:
chain:residue:atoms
"""
if filename:
lines = getLines(filename)
else:
lines = data
structure = {}
for l in lines:
if l.startswith("ATOM") or l.startswith("HETATM"):
res_t=l[17:20].strip()
res_n=l[22:27].strip()
res=res_t+res_n
chain=l[21].strip()
atom=l[12:17].strip()
if not chain in structure:
structure[chain]={}
if not res in structure[chain]:
structure[chain][res] = []
if not atom in structure[chain][res]:
structure[chain][res].append(atom)
return structure
def getAtype(a):
# returns PDB(QT) atom type
#return a.rsplit(None, 1)[1].strip()
return a[77:79].strip()
def frange(start, end=None, step=1):
if end == None:
end = float(start)
start = 0.
else:
start = float(start)
end == float(end)
total = int((float(end)-float(start))/float(step))
#print total
c = 1
out = [start]
while c < total:
c+=1
out.append( start + c*step)
#if c > 100:
# break
#print len(out)
return out
def map2array(data):
""" autogrid map (text list) to NumpyArray """
# grid spacing
spacing = float(data[3].split()[1])
# grid size
pts = data[4].split()[1:]
for i in range(len(pts)):
pts[i] = float(pts[i])+1
# grid center
center = data[5].split()[1:]
for i in range(len(pts)):
center[i] = float(center[i])
# grid points
data = data[6:]
#for i in range(len(data)):
# data[i] = float(data[i])
data = map(float, data) # USELESS? # XXX done anyway later by array
# x,y,z steps
step = [ pts[0]/2 * spacing, pts[1]/2 * spacing, pts[2]/2 * spacing ]
# min, max coord values of the box
v_min = [ center[0]-step[0], center[1]-step[1], center[2]-step[2] ]
v_max = [ center[0]+step[0], center[1]+step[1], center[2]+step[2] ]
data = array(data, 'f').reshape(pts[2], pts[1], pts[0])
return { "values" : data, "spacing" : spacing, 'pts': pts, 'center' : center, 'min' : v_min, 'max' : v_max}
def getInterpolMapValue( ptlist, gridvalues, origin, invstep=None, spacing=None):
# INCOMPLETE! XXX XXX XXX
""" ptlist : numpy.array of pt coordinates for which interpolated
values must be calculated
gridmap : numpy.array of map points (i.e. grid['values'] generated with map2array)
origin : minimum x,y,z coords
spacing : gridmap 'resolution'
invstep : 1./ grid spacing (3-tuple)
(either one of the two can be specified)
"""
if not spacing == None:
invstep = ( 1./ spacing, 1./spacing, 1./spacing )
return ti.trilinterp(ptlist, gridvalues, invstep, origin)
def generateInterpolMap( gridmap, newspacing, agformat=True):
# XXX INCOMPLETE!
newgrid = {}
newgrid['min'] = grid['min']
newgrid['max'] = grid['max']
newgrid['center'] = grid['center']
newgrid['spacing'] = newspacing
space_ratio = spacing / gridmap['spacing']
pts = gridmap['pts']
newpts = [ int(x*space_ratio) for x in pts ]
for i in range(len(newpts)):
if not newpts[i] % 2 == 0:
newpts[i] -=1
#newgrid['values'] # XXX ADD THIS? and remove below?
newgrid['pts'] = newpts
ptlist = []
for z_incr in range( newpts[2]+1 ): #XXX remove the +1 ?
for y_incr in range( newpts[1]+1):
for x_incr in range( newpts[0]+1):
pt = [ vmin[0] + (x_incr*newspacing),
vmin[1] + (y_incr*newspacing),
vmin[2] + (z_incr*newspacing),
]
ptlist.append(pt)
origin = gridmap['min']
values = getInterpolMapValues(ptlist, gridmap['values'], gridmap['spacing'])
if agformat:
values = values.reshape(newpts[2]+1, newpts[1]+1, newpts[0]+1)
newgrid['values'] = values
return newgrid
def writeAutoGridMap( gridmap={}, filename=None, agformat = 1, gpfname = 'gpf.gpf', recname = 'protein'):
header = ( "GRID_PARAMETER_FILE %s\n",
"GRID_DATA_FILE %s.maps.fld\n",
"MACROMOLECULE %s.pdbqt\n",
"SPACING %1.3f\n",
"NELEMENTS %d %d %d\n",
"CENTER %2.3f %2.3f %2.3f\n",
)
#info = header % (gpfname, recname, recname, gridmap['spacing'],
# gridmap['pts'][0], gridmap['pts'][1], gridmap['pts'][2],
# gridmap['center'][0], gridmap['center'][1],gridmap['center'][2])
info = "XXXX"
print "X", len(gridmap['values'])
print "Y", len(gridmap['values'][0])
print "Z", len(gridmap['values'][0][0])
fp = open(filename, 'w')
fp.write(info)
if agformat:
for z in range(gridmap['pts'][2]):
for y in range(gridmap['pts'][1]):
for x in range(gridmap['pts'][0]):
print gridmap['values'][x][y][z]
fp.write('%1.5f\n'% gridmap['values'][x][y][z])
#except:
# print "ERROR", x,y,z, sys.exc_info()[1]
else:
for v in gridmap:
fp.write('%1.5\n'% v)
fp.close()
def gpf2pdb(input_file, output_file=None, atype = 'Fe', center=False):
# XXX This should become 2 separate functions!
""" Convert AutoGrid GPF to PDB.
Points order in the PDB is the following:
8 ______ 7
/. /|
4 /_.___/3|
| . X | | <-- 9
|5....|./6
|.____|/
1 2
"""
GPF = getLines(input_file)
if not output_file:
output_file = input_file.replace('.gpf', '_BOX.pdb')
for line in GPF:
if len(line) > 3:
tmp=line.split()
if tmp[0] == "gridcenter":
center_x = float(tmp[1])
center_y = float(tmp[2])
center_z = float(tmp[3])
if tmp[0] == "npts":
pts_x = float(tmp[1])
pts_y = float(tmp[2])
pts_z = float(tmp[3])
if tmp[0] == "spacing":
res = float(tmp[1])
step_x = pts_x/2 * res
step_y = pts_y/2 * res
step_z = pts_z/2 * res
Max = [ center_x+step_x,center_y+step_y,center_z+step_z]
Min = [ center_x-step_x,center_y-step_y,center_z-step_z]
pdb_out = []
pdb_out.append("REMARK Generated from : %s " % input_file)
corners = []
# 1
corners.append([ center_x - step_x, center_y - step_y, center_z - step_z] )
# 2
corners.append([ center_x + step_x, center_y - step_y, center_z - step_z] )
# 3
corners.append([ center_x + step_x, center_y + step_y, center_z - step_z] )
# 4
corners.append([ center_x - step_x, center_y + step_y, center_z - step_z] )
# 5
corners.append([ center_x - step_x, center_y - step_y, center_z + step_z] )
# 6
corners.append([ center_x + step_x, center_y - step_y, center_z + step_z] )
# 7
corners.append([ center_x + step_x, center_y + step_y, center_z + step_z] )
# 8
corners.append([ center_x - step_x, | |
import unittest2 as unittest
from nose.plugins.attrib import attr
from mock import MagicMock, patch, mock_open, call
import os
from lxml import etree
import sys
import json
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
import ncclient.transport.errors as NcErrors
from ncclient.operations import RPCError, TimeoutExpiredError
from jnpr.junos.facts.swver import version_info
from jnpr.junos import Device
from jnpr.junos.exception import RpcError
from jnpr.junos import exception as EzErrors
from jnpr.junos.console import Console
__author__ = "<NAME>, <NAME>, <NAME>"
__credits__ = "<NAME>"
if sys.version < '3':
builtin_string = '__builtin__'
else:
builtin_string = 'builtins'
facts = {'domain': None, 'hostname': 'firefly', 'ifd_style': 'CLASSIC',
'version_info': version_info('15.1X46-D15.3'),
'2RE': False, 'serialnumber': 'aaf5fe5f9b88', 'fqdn': 'firefly',
'virtual': True, 'switch_style': 'NONE', 'version': '12.1X46-D15.3',
'HOME': '/cf/var/home/rick', 'srx_cluster': False,
'model': 'FIREFLY-PERIMETER',
'RE0': {'status': 'Testing',
'last_reboot_reason': 'Router rebooted after a '
'normal shutdown.',
'model': 'FIREFLY-PERIMETER RE',
'up_time': '6 hours, 29 minutes, 30 seconds'},
'vc_capable': False, 'personality': 'SRX_BRANCH'}
@attr('unit')
class Test_MyTemplateLoader(unittest.TestCase):
def setUp(self):
from jnpr.junos.device import _MyTemplateLoader
self.template_loader = _MyTemplateLoader()
@patch(builtin_string + '.filter')
def test_temp_load_get_source_filter_false(self, filter_mock):
filter_mock.return_value = []
try:
self.template_loader.get_source(None, None)
except Exception as ex:
import jinja2
self.assertEqual(type(ex), jinja2.exceptions.TemplateNotFound)
@patch('jnpr.junos.device.os.path')
def test_temp_load_get_source_filter_true(self, os_path_mock):
# cant use @patch here as with statement will have exit
m = mock_open()
with patch(builtin_string + '.open', m, create=True):
self.template_loader.get_source(None, None)
@attr('unit')
class TestDevice(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
self.dev.open()
@patch('ncclient.operations.session.CloseSession.request')
def tearDown(self, mock_session):
self.dev.close()
def test_new_console_return(self):
dev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
port=23, gather_facts=False)
self.assertTrue(isinstance(dev, Console))
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectAuthError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.AuthenticationError
self.assertRaises(EzErrors.ConnectAuthError, self.dev.open)
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectRefusedError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.SSHError
self.assertRaises(EzErrors.ConnectRefusedError, self.dev.open)
@patch('jnpr.junos.device.netconf_ssh')
@patch('jnpr.junos.device.datetime')
def test_device_ConnectTimeoutError(self, mock_datetime, mock_manager):
mock_manager.connect.side_effect = NcErrors.SSHError(
"Could not open socket to 1.1.1.1:830")
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [currenttime,
currenttime +
timedelta(minutes=4)]
self.assertRaises(EzErrors.ConnectTimeoutError, self.dev.open)
@patch('jnpr.junos.device.netconf_ssh')
@patch('jnpr.junos.device.datetime')
def test_device_diff_err_message(self, mock_datetime, mock_manager):
NcErrors.SSHError.message = 'why are you trying :)'
mock_manager.connect.side_effect = NcErrors.SSHError
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [currenttime,
currenttime +
timedelta(minutes=4)]
self.assertRaises(EzErrors.ConnectError, self.dev.open)
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectUnknownHostError(self, mock_manager):
import socket
mock_manager.connect.side_effect = socket.gaierror
self.assertRaises(EzErrors.ConnectUnknownHostError, self.dev.open)
@patch('jnpr.junos.device.netconf_ssh')
def test_device_other_error(self, mock_manager):
mock_manager.connect.side_effect = TypeError
self.assertRaises(EzErrors.ConnectError, self.dev.open)
def test_device_probe_error(self):
mock_probe = MagicMock()
mock_probe.return_value = None
self.dev.probe = mock_probe
def fn():
self.dev.open(auto_probe=1)
self.assertRaises(EzErrors.ProbeError, fn)
def test_device_property_logfile_isinstance(self):
mock = MagicMock()
with patch(builtin_string + '.open', mock):
if sys.version > '3':
builtin_file = 'io.TextIOWrapper'
else:
builtin_file = builtin_string + '.file'
with patch(builtin_file, MagicMock):
handle = open('filename', 'r')
self.dev.logfile = handle
self.assertEqual(self.dev.logfile, handle)
def test_device_host_mand_param(self):
self.assertRaises(ValueError, Device, user='test',
password='<PASSWORD>',
gather_facts=False)
def test_device_property_logfile_close(self):
self.dev._logfile = MagicMock()
self.dev._logfile.close.return_value = 0
self.dev.logfile = None
self.assertFalse(self.dev._logfile)
def test_device_property_logfile_exception(self):
try:
self.dev.logfile = True
except Exception as ex:
self.assertEqual(type(ex), ValueError)
@patch('jnpr.junos.Device.execute')
def test_device_uptime(self, mock_execute):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
mock_execute.side_effect = self._mock_manager
self.assertEqual(localdev.uptime, 14234)
def test_device_master_is_master(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['re1', 'master', 'node',
'fwdd', 'member', 'pfem']
self.assertEqual(localdev.master, True)
def test_device_master_gnf_is_master(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['gnf1-re0', 'gnf1-master']
localdev.facts._cache['hostname_info'] = {'bsys-re0': 'foo',
'bsys-re1': 'foo1',
'gnf1-re0': 'bar',
'gnf1-re1': 'bar1'}
self.assertEqual(localdev.master, True)
def test_device_master_is_backup(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['re0', 'backup']
self.assertEqual(localdev.master, False)
def test_device_master_gnf_is_backup(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['gnf1-re1', 'gnf1-backup']
localdev.facts._cache['hostname_info'] = {'bsys-re0': 'foo',
'bsys-re1': 'foo1',
'gnf1-re0': 'bar',
'gnf1-re1': 'bar1'}
self.assertEqual(localdev.master, False)
def test_device_master_is_re0_only(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['2RE'] = False
localdev.facts._cache['RE_hw_mi'] = False
localdev.facts._cache['current_re'] = ['re0']
self.assertEqual(localdev.master, True)
def test_device_master_is_multi_chassis_non_master1(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['2RE'] = True
localdev.facts._cache['current_re'] = ['lcc1-re1', 'member1-re1',
'lcc1-backup', 'member1-backup']
self.assertEqual(localdev.master, False)
def test_device_master_is_multi_chassis_non_master2(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['2RE'] = True
localdev.facts._cache['current_re'] = ['lcc1-re0', 'member1-re0',
'lcc1-master', 'member1-master',
'member1']
self.assertEqual(localdev.master, False)
def test_device_master_is_none1(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = None
self.assertEqual(localdev.master, None)
def test_device_master_is_none2(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['2RE'] = True
localdev.facts._cache['current_re'] = ['foo', 'bar']
self.assertEqual(localdev.master, None)
@patch('jnpr.junos.device.warnings')
def test_device_master_is_old_facts(self, mock_warn):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
fact_style='old', gather_facts=False)
mock_warn.assert_has_calls([call.warn('fact-style old will be removed '
'in a future release.',
RuntimeWarning)])
self.assertEqual(localdev.master, None)
def test_device_master_setter(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
with self.assertRaises(RuntimeError):
localdev.master = 'foo'
def test_device_re_name_is_re0(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['re0', 'backup']
localdev.facts._cache['hostname_info'] = {'re0': 'tapir',
're1': 'tapir1'}
self.assertEqual(localdev.re_name, 're0')
def test_device_re_name_is_lcc_re1(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['lcc1-re1', 'member1-re1',
'lcc1-backup', 'member1-backup']
localdev.facts._cache['hostname_info'] = {'re0': 'mj1'}
self.assertEqual(localdev.re_name, 'lcc1-re1')
def test_device_re_name_is_re0_only(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['foo']
localdev.facts._cache['hostname_info'] = {'re0': 'mj1'}
self.assertEqual(localdev.re_name, 're0')
def test_device_re_name_is_bsys_re0(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['re0']
localdev.facts._cache['hostname_info'] = {'bsys-re0': 'foo'}
self.assertEqual(localdev.re_name, 'bsys-re0')
def test_device_re_name_is_none1(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = None
self.assertEqual(localdev.re_name, None)
def test_device_re_name_is_none2(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
localdev.facts._cache['current_re'] = ['re1', 'master', 'node',
'fwdd', 'member', 'pfem']
localdev.facts._cache['hostname_info'] = None
self.assertEqual(localdev.re_name, None)
@patch('jnpr.junos.device.warnings')
def test_device_re_name_is_old_facts(self, mock_warn):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
fact_style='old', gather_facts=False)
mock_warn.assert_has_calls([call.warn('fact-style old will be removed '
'in a future release.',
RuntimeWarning)])
self.assertEqual(localdev.re_name, None)
def test_device_re_name_setter(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
with self.assertRaises(RuntimeError):
localdev.re_name = 'foo'
def test_device_repr(self):
localdev = Device(host='1.1.1.1', user='test', password='<PASSWORD>',
gather_facts=False)
self.assertEqual(repr(localdev), 'Device(1.1.1.1)')
def test_device_local(self):
Device.ON_JUNOS = True
localdev = Device()
self.assertEqual(localdev._hostname, 'localhost')
@patch('jnpr.junos.device.os')
@patch(builtin_string + '.open')
@patch('paramiko.config.SSHConfig.lookup')
def test_device__sshconf_lkup(self, mock_paramiko, open_mock, os_mock):
os_mock.path.exists.return_value = True
self.dev._sshconf_lkup()
mock_paramiko.assert_called_once_with('1.1.1.1')
@patch('jnpr.junos.device.os')
@patch(builtin_string + '.open')
@patch('paramiko.config.SSHConfig.lookup')
def test_device__sshconf_lkup_def(self, mock_paramiko, open_mock, os_mock):
os_mock.path.exists.return_value = True
self.dev._ssh_config = '/home/rsherman/.ssh/config'
self.dev._sshconf_lkup()
mock_paramiko.assert_called_once_with('1.1.1.1')
@patch('paramiko.config.SSHConfig.lookup')
def test_device__sshconf_lkup_sock_fd(self, mock_paramiko):
self.dev2 = Device(sock_fd=6)
self.dev2._sshconf_lkup()
self.assertEqual(self.dev2._sshconf_lkup(), None)
@patch('os.getenv')
def test_device__sshconf_lkup_path_not_exists(self, mock_env):
mock_env.return_value = '/home/test'
self.assertEqual(self.dev._sshconf_lkup(), None)
@patch('os.getenv')
def test_device__sshconf_lkup_home_not_defined(self, mock_env):
mock_env.return_value = None
self.assertEqual(self.dev._sshconf_lkup(), None)
mock_env.assert_called_with('HOME')
@patch('ncclient.manager.connect')
@patch('jnpr.junos.Device.execute')
def test_device_open(self, mock_connect, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(
host='2.2.2.2',
user='test',
password='<PASSWORD>')
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch('ncclient.manager.connect')
@patch('jnpr.junos.Device.execute')
def test_device_outbound(self, mock_connect, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(
sock_fd=6,
user='test',
password='<PASSWORD>')
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch('jnpr.junos.Device.execute')
def test_device_facts(self, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.return_value = """
domain jls.net
"""
self.dev.facts_refresh()
self.dev.facts._cache['current_re'] = ['re0']
assert self.dev.facts['version'] == facts['version']
@patch('jnpr.junos.Device.execute')
@patch('jnpr.junos.factcache.warnings')
def test_device_facts_error(self, mock_warnings, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError('File cant be handled')
self.dev.facts_refresh(warnings_on_failure=True)
self.assertTrue(mock_warnings.warn.called)
@patch('jnpr.junos.Device.execute')
@patch('jnpr.junos.device.warnings')
def test_device_facts_error_exception_on_error(self, mock_warnings,
mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError('File cant be handled')
self.assertRaises(IOError, self.dev.facts_refresh,
exception_on_failure=True)
@patch('jnpr.junos.Device.execute')
@patch('jnpr.junos.device.warnings')
def test_device_old_style_facts_error_exception_on_error(self,
mock_warnings,
mock_execute):
self.dev._fact_style = 'old'
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError('File cant be handled')
self.assertRaises(IOError, self.dev.facts_refresh,
exception_on_failure=True)
def test_device_facts_refresh_unknown_fact_style(self):
self.dev._fact_style = 'bad'
with self.assertRaises(RuntimeError):
self.dev.facts_refresh()
def test_device_facts_refresh_old_fact_style_with_keys(self):
self.dev._fact_style = 'old'
with self.assertRaises(RuntimeError):
self.dev.facts_refresh(keys='domain')
def test_device_hostname(self):
self.assertEqual(self.dev.hostname, '1.1.1.1')
def test_device_user(self):
self.assertEqual(self.dev.user, 'test')
def test_device_get_password(self):
self.assertEqual(self.dev.password, None)
def test_device_set_password(self):
self.dev.password = '<PASSWORD>'
self.assertEqual(self.dev._auth_password, '<PASSWORD>')
def test_device_get_timeout(self):
self.assertEqual(self.dev.timeout, 30)
def test_device_set_timeout(self):
self.dev.timeout = 10
self.assertEqual(self.dev.timeout, 10)
def test_device_set_timeout_string(self):
self.dev.timeout = '10'
self.assertEqual(self.dev.timeout, 10)
def test_device_set_timeout_invalid_string_value(self):
with self.assertRaises(RuntimeError):
self.dev.timeout = 'foo'
def test_device_set_timeout_invalid_type(self):
with self.assertRaises(RuntimeError):
self.dev.timeout = [1,2,3,4]
def test_device_manages(self):
self.assertEqual(self.dev.manages, [],
'By default manages will be empty list')
@patch('ncclient.manager.connect')
@patch('jnpr.junos.Device.execute')
def test_device_open_normalize(self, mock_connect, mock_execute):
mock_connect.side_effect = self._mock_manager
self.dev2 = Device(host='2.2.2.2', user='test', password='<PASSWORD>')
self.dev2.open(gather_facts=False, normalize=True)
self.assertEqual(self.dev2.transform, self.dev2._norm_transform)
def test_device_conn_None_transform(self):
self.dev = Device(host='2.2.2.2', user='test', password='<PASSWORD>')
with self.assertRaises(EzErrors.ConnectError):
self.dev.transform
def test_device_set_facts_exception(self):
try:
self.dev.facts = 'test'
except RuntimeError as ex:
self.assertEqual(RuntimeError, type(ex))
def test_device_ofacts_exception(self):
with self.assertRaises(RuntimeError):
ofacts = self.dev.ofacts
def test_device_set_ofacts_exception(self):
with self.assertRaises(RuntimeError):
self.dev.ofacts = False
@patch('jnpr.junos.Device.execute')
def test_device_cli(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.dev.cli('show cli directory',
warning=False).tag, 'cli')
@patch('jnpr.junos.device.json.loads')
def test_device_rpc_json_ex(self, mock_json_loads):
self.dev.facts = facts
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
ex = ValueError('Extra data ')
ex.message = 'Extra data ' # for py3 as we dont have message thr
mock_json_loads.side_effect = [
ex,
self._mock_manager(etree.fromstring(
'<get-route-information format="json"/>'))]
self.dev.rpc.get_route_information({'format': 'json'})
self.assertEqual(mock_json_loads.call_count, 2)
@patch('jnpr.junos.Device.execute')
def test_device_cli_to_rpc_string(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string('show system uptime')
self.assertEqual("rpc.get_system_uptime_information()", data)
@patch('jnpr.junos.Device.execute')
def test_device_cli_to_rpc_string_strip_pipes(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string(
'show system uptime | match foo | count')
self.assertEqual("rpc.get_system_uptime_information()", data)
@patch('jnpr.junos.Device.execute')
def test_device_cli_to_rpc_string_complex(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string(
'show interfaces ge-0/0/0.0 routing-instance all media')
self.assertEqual("rpc.get_interface_information("
"routing_instance='all', media=True, "
"interface_name='ge-0/0/0.0')", data)
@patch('jnpr.junos.Device.execute')
def test_device_cli_to_rpc_string_invalid(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string('foo')
self.assertEqual(None, data)
@patch('jnpr.junos.Device.execute')
def test_device_cli_format_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli('show interface terse',
warning=False, format='json')
self.assertEqual(type(data), dict)
self.assertEqual(data['interface-information'][0]
['physical-interface'][0]['oper-status'][0]['data'],
'up')
@patch('jnpr.junos.Device.execute')
def test_device_cli_conf_info(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertTrue('ge-0/0/0' in self.dev.cli('show configuration',
warning=False))
@patch('jnpr.junos.Device.execute')
def test_device_cli_output(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertTrue('Alarm' | |
<reponame>Jaye-yi/MPoL
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
# %matplotlib inline
# + nbsphinx="hidden"
# %run notebook_setup
# -
# # HD143006 Tutorial Part 1
#
# This tutorial is the first of an eventual three part series covering a real world application of RML imaging to the HD 143006 protoplanetary disk dataset observed as part of the DSHARP survey ([Andrews et al. 2018](https://ui.adsabs.harvard.edu/abs/2018ApJ...869L..41A/abstract)). In this tutorial (Part 1), we'll provide a brief introduction to the dataset itself, the MPoL package, and perform some brief diagnostic imaging to make sure we have the basics configured properly.
#
# [Part 2](HD143006_Part_2.html) of the tutorial will demonstrate how to set up the optimization loop and to create an RML image.
#
#
# ## Viewing the CLEAN image
#
# Before we dig into RML imaging or the MPoL package, let's get our bearings by looking at the fiducial image provided by the DSHARP survey, synthesized using the CLEAN algorithm. We can download the FITS file directly from the DSHARP survey page
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.utils.data import download_file
fname = download_file(
"https://almascience.eso.org/almadata/lp/DSHARP/images/HD143006_continuum.fits",
cache=True,
pkgname="mpol",
)
# The following commands load the FITS file and calculate the RA and DEC axes using information from the FITS header. For more information on reading FITS files in Python, please consult the [astropy documentation](https://docs.astropy.org/en/stable/io/fits/index.html).
hdul = fits.open(fname)
hdu = hdul[0]
clean_img = np.squeeze(hdu.data)
header = hdu.header
hdul.close()
# +
# get the number of pixels in each direction
nx = header["NAXIS1"]
ny = header["NAXIS2"]
# RA coordinates
CDELT1 = 3600 * header["CDELT1"] # Convert from units of degrees to arcsec
# DEC coordinates
CDELT2 = 3600 * header["CDELT2"] # [arcsec]
# calculate the RA and DEC pixel centers
RA = (np.arange(nx) - nx / 2) * CDELT1 # [arcsec]
DEC = (np.arange(ny) - ny / 2) * CDELT2 # [arcsec]
# matplotlib imshow extent needs to include extra half-pixels.
ext = (
RA[0] - CDELT1 / 2,
RA[-1] + CDELT1 / 2,
DEC[0] - CDELT2 / 2,
DEC[-1] + CDELT2 / 2,
) # [arcsec]
# -
# Here is the CLEAN image produced by the DSHARP team ([Andrews et al. 2018](https://ui.adsabs.harvard.edu/abs/2018ApJ...869L..41A/abstract)).
plt.imshow(clean_img, origin="lower", extent=ext)
# zooming in a little to focus on the disk
r = 0.8
plt.xlim(left=r, right=-r)
plt.ylim(top=r, bottom=-r)
# axis labels
plt.xlabel(r"$\Delta \alpha \cos \delta$ [${}^{\prime\prime}$]")
plt.ylabel(r"$\Delta \delta$ [${}^{\prime\prime}$]")
# ## Plotting the Baselines
#
# Now that we have an idea of what we're looking to image, let's load up the actual dataset. First, we'll download the raw visibilities. To speed things along for the purpose of this tutorial (and avoid introducing CASA as a package dependency), we've already extracted the visibility data as a `.npz` file, which we've made available on Zenodo.
#
# More information on how to do this extraction in general is available on the [visread](https://mpol-dev.github.io/visread/) documentation page, and the specific commands used to produce this `.npz` file are available in the [mpoldatasets](https://github.com/MPoL-dev/mpoldatasets/tree/main/products/HD143006-DSHARP-continuum) package.
fname = download_file(
"https://zenodo.org/record/4904794/files/HD143006_continuum.npz",
cache=True,
pkgname="mpol",
)
# load extracted visibilities from npz file
d = np.load(fname)
uu = d["uu"]
vv = d["vv"]
weight = d["weight"]
data = d["data"]
# To get an idea of the $u,v$ coverage of the dataset, let's plot the $u,v$ coordinates of the visibilities, as we've done in the [cross-validation tutorial](https://mpol-dev.github.io/MPoL/ci-tutorials/crossvalidation.html) and the [visread documentation](https://mpol-dev.github.io/visread/tutorials/introduction_to_casatools.html#Get-the-baselines).
fig, ax = plt.subplots(nrows=1)
ax.scatter(uu, vv, s=0.5, rasterized=True, linewidths=0.0, c="k")
ax.scatter(-uu, -vv, s=0.5, rasterized=True, linewidths=0.0, c="k")
ax.set_xlabel(r"$u$ [k$\lambda$]")
ax.set_ylabel(r"$v$ [k$\lambda$]")
ax.set_aspect("equal")
ax.set_title("Baselines")
# As you can see, there is a very dense grouping of visibilities with $q < 2000\, \mathrm{k}\lambda$, where $q=\sqrt{u^2 + v^2}$, which primarily consists of data taken in more compact ALMA configurations. There are also several visibilities with baselines > 7,000 ($k\lambda$), which correspond to the extended ALMA configurations and is the reason why the DSHARP data can generate some of the highest spatial resolution images of protoplanetary disks to date.
# ## The MPoL Gridder Object
#
# Though we have lofty ambitions to make RML images with these DSHARP visibilities, our first goals are much humbler (and pragmatic). Basically, we want to make sure that we are loading the visibilities into MPoL correctly with the right units and in the right format.
#
# One of the simplest ways to do that is by instantiating an MPoL [Gridder](https://mpol-dev.github.io/MPoL/api.html#mpol.gridding.Gridder) object, which can perform basic visibility averaging and inverse Fourier transforms as implemented in CASA. This allows us to check that we're starting from the same basic understanding of the data by creating our own version of a dirty image.
#
# To instantiate an MPoL Gridder object, we need to make a choice for the size of the pixels `cell_size` and the number of pixels per side of our image, `npix`. You can read more about these properties in the [GridCoords](https://mpol-dev.github.io/MPoL/api.html#mpol.coordinates.GridCoords) API Documentation.
#
# We'll follow the CLEAN example and use the same `cell_size` provided in the FITS image.
cell_size = np.abs(header["CDELT1"] * 3600) # [arcsec]
print("cell_size is {:.4f} arcseconds".format(cell_size))
# The FITS image was a full 3000x3000 pixels. In general, it is good practice to synthesize an image the full size of the primary beam to avoid potential aliasing from bright sources at the edge of your field, or at least once to check that no bright sources exist in the field of view. That appears to be the rationale of the FITS images provided by the DSHARP team.
#
# Since the DSHARP team has already checked there are no bright sub-mm sources in the FOV, we can save time and just make a smaller image corresponding to the protoplanetary emission. If `cell_size` is 0.003 arcseconds, `npix=512` pixels should be sufficient to make an image approximately 1.5 arcseconds on a side. Now, let's import the relevant MPoL routines and instantiate the Gridder.
# +
from mpol import gridding
gridder = gridding.Gridder(
cell_size=cell_size,
npix=512,
uu=uu,
vv=vv,
weight=weight,
data_re=data.real, # separating the real and imaginary values of our data
data_im=data.imag,
)
# -
# ## Making diagnostic dirty images
#
# Now, we'll use the [Gridder.get_dirty_image()](../api.rst#mpol.gridding.Gridder.get_dirty_image) routine to average the visibilities to the grid defined by gridder, and perform an inverse Fourier transform to get the dirty image.
#
# There are different ways to weight the visibilities during the averaging process to promote certain image characteristics. More info on the weighting can be found in the [CASA documentation](https://casa.nrao.edu/casadocs-devel/stable/imaging/synthesis-imaging/data-weighting). The MPoL gridder is capable of averaging visibilities using uniform, natural, and Briggs robust weighting. We'll demonstrate this functionality by making several different dirty images under different averaging schemes.
# We'll write a function to calculate the dirty image and then plot it.
def image_and_plot(weighting, robust=None):
img, beam = gridder.get_dirty_image(
weighting=weighting, robust=robust, unit="Jy/arcsec^2"
)
kw = {"origin": "lower", "extent": gridder.coords.img_ext}
fig, ax = plt.subplots(ncols=1)
im = ax.imshow(np.squeeze(img), **kw)
cbar = plt.colorbar(im)
cbar.set_label(r"$\mathrm{Jy}/\mathrm{arcsec}^2$")
title = weighting
if robust is not None:
title += " r={:.1f}".format(robust)
ax.set_title(title)
ax.set_xlabel(r"$\Delta \alpha \cos \delta$ [${}^{\prime\prime}$]")
ax.set_ylabel(r"$\Delta \delta$ [${}^{\prime\prime}$]")
r = 0.7
plt.xlim(left=r, right=-r)
plt.ylim(bottom=-r, top=r)
return np.squeeze(img)
# Uniform weighting frequently produces images with the best spatial resolution, but at the expense of sensitivity.
_ = image_and_plot(weighting="uniform")
# Natural weighting frequently produces images with the best sensitivity to point sources, but at the expense of spatial resolution.
_ = image_and_plot(weighting="natural")
# Robust weigting provides a (nonlinear) tradeoff between these two regimes, and some form of robust weighting is typically chosen for ALMA imaging.
_ = image_and_plot(weighting="briggs", robust=-1.0)
img_robust_0 = image_and_plot(weighting="briggs", robust=0.0)
# ## Comparing Dirty and CLEANed Images
#
# All in all, it looks like our diagnostic dirty images produced by the MPoL gridder look similar enough in scale and orientation to the DSHARP CLEAN image that we have some confidence that we're reading in the visibility data correctly.
#
# To wrap things up, let's directly compare a dirty image with the DSHARP CLEAN image, which was produced using robust=0.0. (A slight $u,v$ taper was also used, but we'll ignore that detail here).
# +
fig, ax = plt.subplots(ncols=2, figsize=(5.5, 3))
ax[0].imshow(clean_img, origin="lower", extent=ext)
ax[0].set_title("DSHARP CLEAN")
ax[1].imshow(img_robust_0, origin="lower", extent=gridder.coords.img_ext)
ax[1].set_title("MPoL Dirty")
for a in ax:
r = 0.7
a.set_xlim(left=r, right=-r)
a.set_ylim(bottom=-r, top=r)
a.xaxis.set_visible(False)
a.yaxis.set_visible(False)
fig.subplots_adjust()
# -
# All in all, it seems like we're on the right track. The diagnostic dirty image that we've produced with the MPoL gridder object is | |
"""Driver class
A Driver provides an interface to an Entity and its Attributes.
"""
import re
import itertools
import logging
import clusto
from clusto.schema import *
from clusto.exceptions import *
from clusto.drivers.base.clustodriver import *
class Driver(object):
"""Base Driver.
The Driver class provides a proxy interface for managing and Entity and
its Attributes. It provides many helper functions includeing attribute
setters and accessors, attribute querying, and a handful of conventions.
Every driver defines a _clusto_type and a _driver_name member variable.
Upon creation these become the type and driver for the Entity and provides
a mechanism for choosing the correct driver for a given Entity.
A Driver can be created by passing either the name (a string) for a new
Entity you'd like to create, an already instantiated Entity object, or a
Driver object (which has already been instantiated and is managing an
Entity).
If a _properties member dictionary is defined they will be treated as
default values for the given Entity attributes as well as exposed via a
simpler mydriver.key access pattern. So for:
>>> class MyDriver(Driver):
>>> ...
>>> _properties = {'propA': 10, 'propB': "default1"}
>>> ...
>>> d = MyDriver('foo')
>>> d.propA == 10
True
>>> d.propB == "default1"
True
Only properties with non-None default values are set in the clusto db at
initial instantiation time (when creating a brand new entity).
>>> d.propA = 54
>>> d.propA == 54
True
Several conventions are also exposed via the Driver interface.
"""
__metaclass__ = ClustoDriver
_clusto_type = "generic"
_driver_name = "entity"
_properties = dict()
@property
def type(self):
return self.entity.type
@property
def driver(self):
return self.entity.driver
def __new__(cls, name_driver_entity, **kwargs):
if isinstance(name_driver_entity, Driver):
return name_driver_entity
else:
return object.__new__(cls)
def __init__(self, name_driver_entity, **kwargs):
if not isinstance(name_driver_entity, (str, unicode, Entity, Driver)):
raise TypeError("First argument must be a string, "
"Driver, or Entity.")
if isinstance(name_driver_entity, Driver):
return
if isinstance(name_driver_entity, Entity):
self.entity = name_driver_entity
self._choose_best_driver()
return
elif isinstance(name_driver_entity, (str, unicode)):
try:
existing = clusto.get_by_name(name_driver_entity)
except LookupError, x:
existing = None
if existing:
raise NameException("Driver with the name %s already exists."
% (name_driver_entity))
self.entity = Entity(name_driver_entity,
driver=self._driver_name,
clustotype=self._clusto_type)
else:
raise TypeError("Could not create driver from given arguments.")
for key, val in self._properties.iteritems():
if key in kwargs:
val = kwargs[key]
if val is None:
continue
setattr(self, key, val)
def __eq__(self, other):
if isinstance(other, Entity):
return self.entity.name == other.name
elif isinstance(other, Driver):
return self.entity.name == other.entity.name
else:
return False
def __repr__(self):
s = "%s(name=%s, type=%s, driver=%s)"
return s % (self.__class__.__name__, self.entity.name,
self.entity.type, self.entity.driver)
def __cmp__(self, other):
if hasattr(other, 'name'):
return cmp(self.name, other.name)
elif other is None:
return 1
else:
raise TypeError("Cannot compare %s with %s", type(self), type(other))
def __hash__(self):
return hash(self.entity.name)
def __contains__(self, other):
return self.has_attr(key="_contains", value=other)
def _choose_best_driver(self):
"""
Examine the attributes of our entity and set the best driver class and
mixins.
"""
self.__class__ = DRIVERLIST[self.entity.driver]
name = property(lambda x: x.entity.name)
def _check_attr_name(self, key):
"""
check to make sure the key does not contain invalid characters
raise NameException if fail.
"""
if not isinstance(key, basestring):
raise TypeError("An attribute name must be a string.")
if not re.match('^[A-Za-z_]+[0-9A-Za-z_-]*$', key):
raise NameException("Attribute name %s is invalid. "
"Attribute names may not contain periods or "
"comas." % key)
def __getattr__(self, name):
if name in self._properties:
attr = self.attr_query(name, subkey='property')
if not attr:
return self._properties[name]
else:
return attr[0].value
else:
raise AttributeError("Attribute %s does not exist." % name)
def __setattr__(self, name, value):
if name in self._properties:
self.set_attr(name, value, subkey='property')
else:
object.__setattr__(self, name, value)
@classmethod
def ensure_driver(self, obj, msg=None):
"""Ensure that the given argument is a Driver.
If the object is an Entity it will be turned into a Driver and then
returned. If it's a Driver it will be returned unaffected. Otherwise
a TypeError is raised with either a generic or given message.
"""
if isinstance(obj, Entity):
d = Driver(obj)
elif isinstance(obj, Driver):
d = obj
else:
if not msg:
msg = "Not a Driver."
raise TypeError(msg)
return d
@classmethod
def do_attr_query(cls, key=(), value=(), number=(),
subkey=(), ignore_hidden=True, sort_by_keys=False,
glob=False, count=False, querybase=None, return_query=False,
entity=None):
"""Does queries against all Attributes using the DB."""
clusto.flush()
if querybase:
query = querybase
else:
query = Attribute.query()
### This is bunk, gotta fix it
if isinstance(cls, Driver):
query = query.filter(and_(Attribute.entity_id==Entity.entity_id,
Entity.driver == cls._driver_name,
Entity.type == cls._clusto_type))
if entity:
query = query.filter_by(entity_id=entity.entity_id)
if key is not ():
if glob:
query = query.filter(Attribute.key.like(key.replace('*', '%')))
else:
query = query.filter_by(key=key)
if subkey is not ():
if glob and subkey:
query = query.filter(Attribute.subkey.like(subkey.replace('*', '%')))
else:
query = query.filter_by(subkey=subkey)
if value is not ():
typename = Attribute.get_type(value)
if typename == 'relation':
if isinstance(value, Driver):
value = value.entity.entity_id
query = query.filter_by(relation_id=value)
else:
query = query.filter_by(**{typename+'_value':value})
if number is not ():
if isinstance(number, bool) or number is None:
if number == True:
query = query.filter(Attribute.number != None)
else:
query = query.filter(Attribute.number == None)
elif isinstance(number, (int, long)):
query = query.filter_by(number=number)
else:
raise TypeError("number must be either a boolean or an integer.")
if ignore_hidden and ((key and not key.startswith('_')) or key is ()):
query = query.filter(not_(Attribute.key.like('\\_%', escape='\\')))
if sort_by_keys:
query = query.order_by(Attribute.key)
if count:
return query.count()
if return_query:
return query
return query.all()
def attr_query(self, *args, **kwargs):
"""Queries all attributes of *this* entity using the DB."""
kwargs['entity'] = self.entity
return self.do_attr_query(*args, **kwargs)
@classmethod
def attr_filter(cls, attrlist, key=(), value=(), number=(),
subkey=(), ignore_hidden=True,
sort_by_keys=True,
regex=False,
clusto_types=None,
clusto_drivers=None,
):
"""Filter attribute lists. (Uses generator comprehension)
Given a list of Attributes filter them based on exact matches of key,
number, subkey, value.
There are some special cases:
if number is True then the number variable must be non-null. if
number is False then the number variable must be null.
if ignore_hidden is True (the default) then filter out keys that begin
with an underscore, if false don't filter out such keys. If you
specify a key that begins with an underscore as one of the arguments
then ignore_hidden is assumed to be False.
if sort_by_keys is True then attributes are returned sorted by keys,
otherwise their order is undefined.
if regex is True then treat the key, subkey, and value query
parameters as regular expressions.
clusto_types is a list of types that the entities referenced by
relation attributes must match.
clusto_drivers is a list of drivers that the entities referenced by
relation attributes must match.
"""
result = attrlist
def subfilter(attrs, val, name):
if regex:
testregex = re.compile(val)
result = (attr for attr in attrs
if testregex.match(getattr(attr, name)))
else:
result = (attr for attr in attrs
if getattr(attr, name) == val)
return result
parts = ((key, 'key'), (subkey, 'subkey'), (value, 'value'))
argattr = ((val,name) for val,name in parts if val is not ())
for v, n in argattr:
result = subfilter(result, v, n)
if number is not ():
if isinstance(number, bool) or number is None:
if number:
result = (attr for attr in result if attr.number is not None)
else:
result = (attr for attr in result if attr.number is None)
elif isinstance(number, (int, long)):
result = (attr for attr in result if attr.number == number)
else:
raise TypeError("number must be either a boolean or an integer.")
if value:
result = (attr for attr in result if attr.value == value)
if key and key.startswith('_'):
ignore_hidden = False
if ignore_hidden:
result = (attr for attr in result if not attr.key.startswith('_'))
if clusto_drivers:
cdl = [clusto.get_driver_name(n) for n in clusto_drivers]
result = (attr for attr in result if attr.is_relation and attr.value.entity.driver in cdl)
if clusto_types:
ctl = [clusto.get_type_name(n) for n in clusto_types]
result = (attr for attr in result if attr.is_relation and attr.value.entity.type in ctl)
if sort_by_keys:
result = sorted(result)
return list(result)
def _itemize_attrs(self, attrlist):
return [(x.keytuple, x.value) for x in attrlist]
def attrs(self, *args, **kwargs):
"""Return attributes for this entity.
(filters whole attribute list as opposed to querying the db directly)
"""
if 'merge_container_attrs' in kwargs:
merge_container_attrs = kwargs.pop('merge_container_attrs')
else:
merge_container_attrs = False
ignore_memcache = False
if 'ignore_memcache' in kwargs:
ignore_memcache = kwargs.pop('ignore_memcache')
if clusto.SESSION.memcache and not ignore_memcache:
logging.debug('Pulling info from memcache when | |
<reponame>oliverwatts/snickery
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project:
## Author: <NAME> - <EMAIL>
import sys
import os
import glob
from argparse import ArgumentParser
import h5py
import numpy as np
from const import target_rep_widths
from speech_manip import get_speech
from util import safe_makedir, readlist
from file_naming import get_data_dump_name
from data_manipulation import locate_stream_directories, get_mean_std, compose_speech, standardise
DODEBUG = False
def debug(msg):
if DODEBUG:
print msg
def main_work(config, overwrite_existing_data=False):
assert config['target_representation'] == 'epoch'
database_fname = get_data_dump_name(config)
if os.path.isfile(database_fname):
if not overwrite_existing_data:
sys.exit('Data already exists at %s -- run with -X to overwrite it'%(database_fname))
else:
os.system('rm '+database_fname)
n_train_utts = config.get('n_train_utts', 0) ## default (0): use all sentences
target_feat_dirs = config['target_datadirs']
datadims_target = config['datadims_target']
stream_list_target = config['stream_list_target']
## get dicts mapping e.g. 'mgc': '/path/to/mgc/' : -
target_stream_dirs = locate_stream_directories(target_feat_dirs, stream_list_target)
join_feat_dirs = config['join_datadirs']
datadims_join = config['datadims_join']
stream_list_join = config['stream_list_join']
## get dicts mapping e.g. 'mgc': '/path/to/mgc/' : -
join_stream_dirs = locate_stream_directories(join_feat_dirs, stream_list_join)
## First, work out initial list of training utterances based on files present in first stream subdir:
first_stream = stream_list_target[0] ## <-- typically, mgc, but not really important
utt_list = sorted(glob.glob(target_stream_dirs[first_stream] +'/*.' + first_stream))
flist = [os.path.split(fname)[-1].replace('.'+first_stream,'') for fname in utt_list]
## Next, limit training utterances by number or by pattern:
if type(n_train_utts) == int:
if (n_train_utts == 0 or n_train_utts > len(flist)):
n_train_utts = len(flist)
flist = flist[:n_train_utts]
elif type(n_train_utts) == str:
match_expression = n_train_utts
flist = [name for name in flist if match_expression in name]
print 'Selected %s utts with pattern %s'%(len(flist), match_expression)
## Also filter for test material, in case they are in same directory:
if 'test_patterns' in config:
test_flist = []
for fname in flist:
for pattern in config['test_patterns']:
if pattern in fname:
test_flist.append(fname)
flist = [name for name in flist if name not in test_flist]
## Finally, only take utterances which occur in train_list, if it is given in config:
if 'train_list' in config:
assert os.path.isfile(config['train_list']), 'File %s does not exist'%(config['train_list'])
train_list = readlist(config['train_list'])
train_list = dict(zip(train_list, train_list))
flist = [name for name in flist if name in train_list]
assert len(flist) > 0
## 1A) First pass: get mean and std per stream for each of {target,join}
(mean_vec_target, std_vec_target) = get_mean_std(target_stream_dirs, stream_list_target, datadims_target, flist)
(mean_vec_join, std_vec_join) = get_mean_std(join_stream_dirs, stream_list_join, datadims_join, flist)
## 1B) Initialise HDF5; store mean and std in HDF5:
f = h5py.File(database_fname, "w")
mean_target_dset = f.create_dataset("mean_target", np.shape(mean_vec_target), dtype='f', track_times=False)
std_target_dset = f.create_dataset("std_target", np.shape(std_vec_target), dtype='f', track_times=False)
mean_join_dset = f.create_dataset("mean_join", np.shape(mean_vec_join), dtype='f', track_times=False)
std_join_dset = f.create_dataset("std_join", np.shape(std_vec_join), dtype='f', track_times=False)
mean_target_dset[:] = mean_vec_target[:]
std_target_dset[:] = std_vec_target[:]
mean_join_dset[:] = mean_vec_join[:]
std_join_dset[:] = std_vec_join[:]
## Set some values....
target_dim = mean_vec_target.shape[0]
join_dim = mean_vec_join.shape[0]
target_rep_size = target_dim * target_rep_widths[config.get('target_representation', 'epoch')]
fshift_seconds = (0.001 * config['frameshift_ms'])
fshift = int(config['sample_rate'] * fshift_seconds)
samples_per_frame = fshift
print 'Go through data to find number of units:- '
n_units = 0
new_flist = []
first_stream, first_streamdir = sorted(target_stream_dirs.items())[0]
for base in flist:
featfile = os.path.join(first_streamdir, base + '.' + first_stream)
if not os.path.exists(featfile):
print 'skipping %s'%(featfile)
continue
speech = get_speech(featfile, datadims_target[first_stream])
npoint, _ = speech.shape
n_units += npoint
new_flist.append(base)
flist = new_flist
print '%s units (%s)'%(n_units, config.get('target_representation', 'epoch'))
## 2) Get ready to store data in HDF5:
total_target_dim = target_rep_size
## maxshape makes a dataset resizable
train_dset = f.create_dataset("train_unit_features", (n_units, total_target_dim), maxshape=(n_units, total_target_dim), dtype='f', track_times=False)
phones_dset = f.create_dataset("train_unit_names", (n_units,), maxshape=(n_units,), dtype='|S50', track_times=False)
filenames_dset = f.create_dataset("filenames", (n_units,), maxshape=(n_units,), dtype='|S50', track_times=False)
unit_index_within_sentence_dset = f.create_dataset("unit_index_within_sentence_dset", (n_units,), maxshape=(n_units,), dtype='i', track_times=False)
join_contexts_dset = f.create_dataset("join_contexts", (n_units+1, join_dim), maxshape=(n_units+1, join_dim), dtype='f', track_times=False)
### TODO: use?
if config.get('store_full_magphase', False):
mp_mag_dset = f.create_dataset("mp_mag", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_imag_dset = f.create_dataset("mp_imag", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_real_dset = f.create_dataset("mp_real", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_fz_dset = f.create_dataset("mp_fz", (n_units, 1), maxshape=(n_units, 1), dtype='f', track_times=False)
## Standardise data (within streams), compose, add VUV, fill F0 gaps with utterance mean voiced value:
start = 0
print 'Composing ....'
print flist
new_flist = []
for base in flist:
print base
#! pm_file = os.path.join(config['pm_datadir'], base + '.pm')
# if not(os.path.isfile(pm_file)):
# print 'Warning: no pm -- skip!'
# continue
#! ## Get pitchmarks (to join halfphones on detected GCIs):-
# pms_seconds = read_pm(pm_file)
# if pms_seconds.shape == (1,1):
# print 'Warning: trouble reading pm file -- skip!'
# continue
### Get speech params for target cost (i.e. probably re-generated speech for consistency):
t_speech = compose_speech(target_stream_dirs, base, stream_list_target, datadims_target)
if t_speech.shape == [1,1]: ## bad return value
continue
t_speech = standardise(t_speech, mean_vec_target, std_vec_target)
### Get speech params for join cost (i.e. probably natural speech).
### These are expected to have already been resampled so that they are pitch-synchronous.
j_speech = compose_speech(join_stream_dirs, base, stream_list_join, datadims_join)
if j_speech.size == 1: ## bad return value
continue
j_speech = standardise(j_speech, mean_vec_join, std_vec_join)
j_frames, j_dim = j_speech.shape
# if j_frames != len(pms_seconds):
# print (j_frames, len(pms_seconds))
# print 'Warning: number of rows in join cost features not same as number of pitchmarks:'
# print 'these features should be pitch synchronous. Skipping utterance!'
# continue
t_frames, t_dim = t_speech.shape
if j_frames != t_frames:
print (j_frames, t_frames)
print 'Warning: number of rows in target cost features not same as number in join cost features:'
print ' Skipping utterance!'
continue
first_sentence_in_corpus = base==flist[0]
if config.get('REPLICATE_IS2018_EXP', False):
unit_features = t_speech[1:-1, :] ## Representations for target cost
if first_sentence_in_corpus:
context_data = j_speech[:-1, :]
else:
context_data = j_speech[1:-1, :]
else: ## this should be consistent with how hi-dim frames are selected and remove a bug
unit_features = t_speech ## do not trim frames
if first_sentence_in_corpus:
initial_history = j_speech[0,:].reshape((1,-1)) ### assume that first frame is silence
context_data = np.vstack([initial_history, j_speech])
else:
context_data = j_speech
## TODO: reinstate this?:--
ADD_PHONETIC_EPOCH = False
if ADD_PHONETIC_EPOCH:
labfile = os.path.join(config['label_datadir'], base + '.' + config['lab_extension'])
labs = read_label(labfile, config['quinphone_regex'])
unit_names = resample_labels.pitch_synchronous_resample_label(48000, 0.005, pms_samples, labs)
else:
unit_names = ['_']*(t_speech.shape[0])
unit_names = np.array(unit_names)
#
#
if config.get('REPLICATE_IS2018_EXP', False):
unit_names = unit_names[1:-1]
m,n = unit_features.shape
filenames = [base] * m
unit_index_within_sentence = np.arange(m)
## TODO: reinstate this as hi-dim writer?:--
CHECK_MAGPHASE_SIZES = False
if CHECK_MAGPHASE_SIZES: # config.get('store_full_magphase', False):
print 'CHECK_MAGPHASE_SIZES'
for extn in ['mag','imag','real','f0']:
direc = extn + '_full'
if extn == 'f0':
sdim = 1
else:
sdim = 513
fname = os.path.join(config['full_magphase_dir'], direc, base+'.'+extn)
full_stream = get_speech(fname, sdim)
#full_stream = full_stream[1:-1,:]
print direc
print full_stream.shape
## TODO: reinstate this as hi-dim writer?:--
if config.get('store_full_magphase', False):
mp_data = []
for extn in ['mag','imag','real','f0']:
direc = extn + '_full'
if extn == 'f0':
sdim = 1
else:
sdim = 513
fname = os.path.join(config['full_magphase_dir'], direc, base+'.'+extn)
full_stream = get_speech(fname, sdim)
full_stream = full_stream[1:-1,:]
print direc
print full_stream.shape
mp_data.append(full_stream)
## Add everything to database:
train_dset[start:start+m, :] = unit_features
phones_dset[start:start+m] = unit_names
filenames_dset[start:start+m] = filenames
unit_index_within_sentence_dset[start:start+m] = unit_index_within_sentence
#! cutpoints_dset[start:start+m,:] = cutpoints
### join_contexts has extra initial frame of history -- deal with it:
if first_sentence_in_corpus:
join_contexts_dset[start:start+m+1, :] = context_data
else:
join_contexts_dset[start+1:start+m+1, :] = context_data
### TODO: use?
if config.get('store_full_magphase', False):
(mp_mag, mp_imag, mp_real, mp_fz) = mp_data
mp_mag_dset[start:start+m, :] = mp_mag
mp_imag_dset[start:start+m, :] = mp_imag
mp_real_dset[start:start+m, :] = mp_real
mp_fz_dset[start:start+m, :] = mp_fz
start += m
new_flist.append(base)
## Number of units was computed before without considering dropped utterances, actual number
## will be smaller. Resize the data:
actual_nframes = start
print '\n\n\nNumber of units actually written:'
print actual_nframes
print
train_dset.resize(actual_nframes, axis=0)
phones_dset.resize(actual_nframes, axis=0)
filenames_dset.resize(actual_nframes, axis=0)
unit_index_within_sentence_dset.resize(actual_nframes, axis=0)
join_contexts_dset.resize(actual_nframes+1, axis=0)
### TODO
if config.get('store_full_magphase', False):
mp_mag_dset.resize(actual_nframes, axis=0)
mp_imag_dset.resize(actual_nframes, axis=0)
mp_real_dset.resize(actual_nframes, axis=0)
mp_fz_dset.resize(actual_nframes, axis=0)
print
print 'Storing hybrid voice data:'
for thing in f.values():
print thing
f.close()
print 'Stored training data for %s sentences to %s'%(n_train_utts, database_fname)
if __name__ == '__main__':
#################################################
# ======== process command line ==========
a = ArgumentParser()
a.add_argument('-c', dest='config_fname', required=True)
a.add_argument('-X', dest='overwrite_existing_data', | |
makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_post(datacenter, async_req=True)
>>> result = thread.get()
:param datacenter: Datacenter to be created (required)
:type datacenter: Datacenter
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Datacenter
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_post_with_http_info(datacenter, **kwargs) # noqa: E501
def datacenters_post_with_http_info(self, datacenter, **kwargs): # noqa: E501
"""Create a Data Center # noqa: E501
Virtual data centers are the foundation of the platform. They act as logical containers for all other objects you will be creating, e.g. servers. You can provision as many data centers as you want. Datacenters have their own private network and are logically segmented from each other to create isolation. You can use this POST method to create a simple datacenter or to create a datacenter with multiple objects under it such as servers and storage volumes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_post_with_http_info(datacenter, async_req=True)
>>> result = thread.get()
:param datacenter: Datacenter to be created (required)
:type datacenter: Datacenter
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Datacenter, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_post" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter' is set
if self.api_client.client_side_validation and ('datacenter' not in local_var_params or # noqa: E501
local_var_params['datacenter'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter` when calling `datacenters_post`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_post`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_post`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'datacenter' in local_var_params:
body_params = local_var_params['datacenter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Datacenter'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_put(self, datacenter_id, datacenter, **kwargs): # noqa: E501
"""Modify a Data Center # noqa: E501
You can use update datacenter to re-name the datacenter or update its description # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_put(datacenter_id, datacenter, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the datacenter (required)
:type datacenter_id: str
:param datacenter: Modified Data Center (required)
:type datacenter: Datacenter
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Datacenter
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_put_with_http_info(datacenter_id, datacenter, **kwargs) # noqa: E501
def datacenters_put_with_http_info(self, datacenter_id, datacenter, **kwargs): # noqa: E501
"""Modify a Data Center # noqa: E501
You can use update datacenter to re-name the datacenter or update its description # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_put_with_http_info(datacenter_id, datacenter, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the datacenter (required)
:type datacenter_id: str
:param datacenter: Modified Data Center (required)
:type datacenter: Datacenter
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children | |
may be obtained by inspecting the return value's
`value` attribute.
Simple increment::
rv = cb.counter("key")
rv.value
# 42
Increment by 10::
rv = cb.counter("key", delta=10)
Decrement by 5::
rv = cb.counter("key", delta=-5)
Increment by 20, set initial value to 5 if it does not exist::
rv = cb.counter("key", delta=20, initial=5)
Increment three keys::
kv = cb.counter_multi(["foo", "bar", "baz"])
for key, result in kv.items():
print "Key %s has value %d now" % (key, result.value)
.. seealso:: :meth:`counter_multi`
"""
return _Base.counter(self, key, delta=delta, initial=initial, ttl=ttl)
def lookup_in(self, key, *specs, **kwargs):
"""Atomically retrieve one or more paths from a document.
:param key: The key of the document to lookup
:param spec: A list of specs (see :mod:`.couchbase_core.subdocument`)
:return: A :class:`.couchbase_core.result.SubdocResult` object.
This object contains the results and any errors of the
operation.
Example::
import couchbase_core.subdocument as SD
rv = cb.lookup_in('user',
SD.get('email'),
SD.get('name'),
SD.exists('friends.therock'))
email = rv[0]
name = rv[1]
friend_exists = rv.exists(2)
.. seealso:: :meth:`retrieve_in` which acts as a convenience wrapper
"""
return super(Bucket, self).lookup_in(key,specs,**kwargs)
def mutate_in(self, key, *specs, **kwargs):
"""Perform multiple atomic modifications within a document.
:param key: The key of the document to modify
:param specs: A list of specs (See :mod:`.couchbase_core.subdocument`)
:param bool create_doc:
Whether the document should be create if it doesn't exist
:param bool insert_doc: If the document should be created anew, and the
operations performed *only* if it does not exist.
:param bool upsert_doc: If the document should be created anew if it
does not exist. If it does exist the commands are still executed.
:param kwargs: CAS, etc.
:return: A :class:`~.couchbase_core.result.SubdocResult` object.
Here's an example of adding a new tag to a "user" document
and incrementing a modification counter::
import couchbase_core.subdocument as SD
# ....
cb.mutate_in('user',
SD.array_addunique('tags', 'dog'),
SD.counter('updates', 1))
.. note::
The `insert_doc` and `upsert_doc` options are mutually exclusive.
Use `insert_doc` when you wish to create a new document with
extended attributes (xattrs).
.. seealso:: :mod:`.couchbase_core.subdocument`
"""
return super(Bucket, self).mutate_in(key, specs, **kwargs)
def retrieve_in(self, key, *paths, **kwargs):
"""Atomically fetch one or more paths from a document.
Convenience method for retrieval operations. This functions
identically to :meth:`lookup_in`. As such, the following two
forms are equivalent:
.. code-block:: python
import couchbase_v2.subdocument as SD
rv = cb.lookup_in(key,
SD.get('email'),
SD.get('name'),
SD.get('friends.therock')
email, name, friend = rv
.. code-block:: python
rv = cb.retrieve_in(key, 'email', 'name', 'friends.therock')
email, name, friend = rv
.. seealso:: :meth:`lookup_in`
"""
import couchbase_core.subdocument as SD
return self.lookup_in(key, *tuple(SD.get(x) for x in paths), **kwargs)
def incr(self, key, amount=1, **kwargs):
_depr('incr', 'counter')
return self.counter(key, delta=amount, **kwargs)
def incr_multi(self, keys, amount=1, **kwargs):
_depr('incr_multi', 'counter_multi')
return self.counter_multi(keys, delta=amount, **kwargs)
def decr(self, key, amount=1, **kwargs):
_depr('decr', 'counter')
return self.counter(key, delta=-amount, **kwargs)
def decr_multi(self, keys, amount=1, **kwargs):
_depr('decr_multi', 'counter_multi')
return self.counter_multi(keys, delta=-amount, **kwargs)
def stats(self, keys=None, keystats=False):
"""Request server statistics.
Fetches stats from each node in the cluster. Without a key
specified the server will respond with a default set of
statistical information. It returns the a `dict` with stats keys
and node-value pairs as a value.
:param keys: One or several stats to query
:type keys: string or list of string
:raise: :exc:`.CouchbaseNetworkException`
:return: `dict` where keys are stat keys and values are
host-value pairs
Find out how many items are in the bucket::
total = 0
for key, value in cb.stats()['total_items'].items():
total += value
Get memory stats (works on couchbase buckets)::
cb.stats('memory')
# {'mem_used': {...}, ...}
"""
if keys and not isinstance(keys, (tuple, list)):
keys = (keys,)
return self._stats(keys, keystats=keystats)
def observe(self, key, master_only=False):
"""Return storage information for a key.
It returns a :class:`.ValueResult` object with the ``value``
field set to a list of :class:`~.ObserveInfo` objects. Each
element in the list responds to the storage status for the key
on the given node. The length of the list (and thus the number
of :class:`~.ObserveInfo` objects) are equal to the number of
online replicas plus the master for the given key.
:param string key: The key to inspect
:param bool master_only: Whether to only retrieve information
from the master node.
.. seealso:: :ref:`observe_info`
"""
return _Base.observe(self, key, master_only=master_only)
def endure(self, key, persist_to=-1, replicate_to=-1, cas=0,
check_removed=False, timeout=5.0, interval=0.010):
"""Wait until a key has been distributed to one or more nodes
By default, when items are stored to Couchbase, the operation is
considered successful if the vBucket master (i.e. the "primary"
node) for the key has successfully stored the item in its
memory.
In most situations, this is sufficient to assume that the item
has successfully been stored. However the possibility remains
that the "master" server will go offline as soon as it sends
back the successful response and the data is lost.
The ``endure`` function allows you to provide stricter criteria
for success. The criteria may be expressed in terms of number of
nodes for which the item must exist in that node's RAM and/or on
that node's disk. Ensuring that an item exists in more than one
place is a safer way to guarantee against possible data loss.
We call these requirements `Durability Constraints`, and thus
the method is called `endure`.
:param string key: The key to endure.
:param int persist_to: The minimum number of nodes which must
contain this item on their disk before this function
returns. Ensure that you do not specify too many nodes;
otherwise this function will fail. Use the
:attr:`server_nodes` to determine how many nodes exist in
the cluster.
The maximum number of nodes an item can reside on is
currently fixed to 4 (i.e. the "master" node, and up to
three "replica" nodes). This limitation is current as of
Couchbase Server version 2.1.0.
If this parameter is set to a negative value, the maximum
number of possible nodes the key can reside on will be used.
:param int replicate_to: The minimum number of replicas which
must contain this item in their memory for this method to
succeed. As with ``persist_to``, you may specify a negative
value in which case the requirement will be set to the
maximum number possible.
:param float timeout: A timeout value in seconds before this
function fails with an exception. Typically it should take
no longer than several milliseconds on a functioning cluster
for durability requirements to be satisfied (unless
something has gone wrong).
:param float interval: The polling interval in seconds to use
for checking the key status on the respective nodes.
Internally, ``endure`` is implemented by polling each server
individually to see if the key exists on that server's disk
and memory. Once the status request is sent to all servers,
the client will check if their replies are satisfactory; if
they are then this function succeeds, otherwise the client
will wait a short amount of time and try again. This
parameter sets this "wait time".
:param bool check_removed: This flag inverts the check. Instead
of checking that a given key *exists* on the nodes, this
changes the behavior to check that the key is *removed* from
the nodes.
:param long cas: The CAS value to check against. It is possible
for an item to exist on a node but have a CAS value from a
prior operation. Passing the CAS ensures that only replies
from servers with a CAS matching this parameter are accepted.
:return: A :class:`~.OperationResult`
:raise: see :meth:`upsert` and :meth:`get` for possible errors
.. seealso:: :meth:`upsert`, :meth:`endure_multi`
"""
# We really just wrap 'endure_multi'
kv = {key: cas}
rvs = self.endure_multi(keys=kv, persist_to=persist_to,
replicate_to=replicate_to,
check_removed=check_removed, timeout=timeout,
interval=interval)
return rvs[key]
def durability(self, persist_to=-1, replicate_to=-1, timeout=0.0):
"""Returns a context manager which will apply the given
persistence/replication settings to all mutation operations when
active
:param int persist_to:
:param int replicate_to:
See :meth:`endure` for the meaning of these two values
Thus, something like::
with cb.durability(persist_to=3):
cb.upsert("foo", "foo_value")
cb.upsert("bar", "bar_value")
cb.upsert("baz", "baz_value")
is equivalent to::
cb.upsert("foo", "foo_value", persist_to=3)
cb.upsert("bar", "bar_value", persist_to=3)
cb.upsert("baz", "baz_value", persist_to=3)
.. versionadded:: 1.2.0
.. seealso:: :meth:`endure`
"""
return | |
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
insert_single_values_expr = None
"""When an INSERT is compiled with a single set of parameters inside
a VALUES expression, the string is assigned here, where it can be
used for insert batching schemes to rewrite the VALUES expression.
.. versionadded:: 1.3.8
"""
insert_prefetch = update_prefetch = ()
def __init__(
self, dialect, statement, column_keys=None, inline=False, **kwargs
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, "inline", False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = prefix_anon_map()
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and self._numeric_binds:
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r"\[_POSITION\]", lambda m: str(util.next(poscount)), self.string
)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value)
for key, value in (
(
self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect),
)
for bindparam in self.bind_names
)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs[
"render_label_as_label"
] = element.element._order_by_label_element
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError as ke:
elements._no_text_coercion(
element.element,
exc.CompileError,
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc.",
err=ke,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until " "its 'name' is assigned."
)
def visit_column(
self, column, add_to_result_map=None, include_table=True, **kwargs
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name, orig_name, (column, name, column.key), column.type
)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_text_as_from(
self, taf, compound_index=None, asfrom=False, parens=True, **kw
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = (
self._textual_ordered_columns
) = taf.positional
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
| |
<reponame>JoanAzpeitia/lp_sg
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import sys
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.activity_stream_widget import Ui_ActivityStreamWidget
from .widget_new_item import NewItemWidget, SimpleNewItemWidget
from .widget_note import NoteWidget
from .widget_value_update import ValueUpdateWidget
from .dialog_reply import ReplyDialog
from .data_manager import ActivityStreamDataHandler
from .overlaywidget import SmallOverlayWidget
note_input_widget = sgtk.platform.current_bundle().import_module("note_input_widget")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ActivityStreamWidget(QtGui.QWidget):
"""
QT Widget that displays the Shotgun activity stream for an entity.
:signal entity_requested(str, int): Fires when someone clicks an entity inside
the activity stream. The returned parameters are entity type and entity id.
:signal playback_requested(dict): Fires when someone clicks the playback url
on a version. Returns a shotgun dictionary with information
about the version.
:signal entity_created(object): Fires when a Note or Reply entity is created by
an underlying widget within the activity stream. Returns a Shotgun dictionary
with information about the new Entity.
:ivar reply_dialog: When a ReplyDialog is active it can be accessed here. If there
is no ReplyDialog active, then this will be set to None.
:vartype reply_dialog: .dialog_reply.ReplyDialog or None
"""
# max number of items to show in the activity stream.
MAX_STREAM_LENGTH = 20
# Activity attributes that we do not want displayed.
_SKIP_ACTIVITY_ATTRIBUTES = ["viewed_by_current_user"]
entity_requested = QtCore.Signal(str, int)
playback_requested = QtCore.Signal(dict)
# The int is the Note entity id that was selected or deselected.
note_selected = QtCore.Signal(int)
note_deselected = QtCore.Signal(int)
note_arrived = QtCore.Signal(int)
# Emitted when a Note or Reply entity is created. The
# entity type as a string and id as an int will be
# provided.
#
# dict(entity_type="Note", id=1234)
entity_created = QtCore.Signal(object)
def __init__(self, parent):
"""
:param parent: QT parent object
:type parent: :class:`~PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self, parent)
self._bundle = sgtk.platform.current_bundle()
# now load in the UI that was created in the UI designer
self.ui = Ui_ActivityStreamWidget()
self.ui.setupUi(self)
# The note widget will be turned on when an entity is loaded
# if the entity is of an appropriate type.
self.ui.note_widget.hide()
# customizations
self._allow_screenshots = True
self._show_sg_stream_button = True
self._version_items_playable = True
self._clickable_user_icons = True
self._show_note_links = True
self._highlight_new_arrivals = True
self._notes_are_selectable = False
self._attachments_filter = None
# apply styling
self._load_stylesheet()
# keep an overlay for loading
overlay_module = self._bundle.import_module("overlay_widget")
self.__overlay = overlay_module.ShotgunOverlayWidget(self)
self.__small_overlay = SmallOverlayWidget(self)
# set insertion order into list to be bottom-up
self.ui.activity_stream_layout.setDirection(QtGui.QBoxLayout.BottomToTop)
# create a data manager to handle backend
self._data_manager = ActivityStreamDataHandler(self)
# set up signals
self._data_manager.note_arrived.connect(self._process_new_note)
self._data_manager.update_arrived.connect(self._process_new_data)
self._data_manager.thumbnail_arrived.connect(self._process_thumbnail)
self.ui.note_widget.entity_created.connect(self._on_entity_created)
self.ui.note_widget.data_updated.connect(self.rescan)
# keep handles to all widgets to be nice to the GC
self._loading_widget = None
self._activity_stream_static_widgets = []
self._activity_stream_data_widgets = {}
# state management
self._task_manager = None
self._sg_entity_dict = None
self._entity_type = None
self._entity_id = None
self._select_on_arrival = dict()
# We'll be keeping a persistent reply dialog available because
# we need to connect to a signal that it's emitting. It's easiest
# to do that if we're dealing with an object that persists.
self.reply_dialog = ReplyDialog(
self,
self._task_manager,
note_id=None,
allow_screenshots=self._allow_screenshots,
)
# We'll allow for a pre-note-creation callback. This is for additional
# pre-processing that needs to occur before a Note or Reply is created
# in Shotgun. This makes sure that the activity stream data coming down
# during the rescan after submission contains anything like additional
# attachments that this widget didn't explicitly handle itself prior to
# submission.
self._pre_submit_callback = None
self.reply_dialog.note_widget.entity_created.connect(self._on_entity_created)
def set_bg_task_manager(self, task_manager):
"""
Specify the background task manager to use to pull
data in the background. Data calls
to Shotgun will be dispatched via this object.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self._task_manager = task_manager
self._data_manager.set_bg_task_manager(task_manager)
self.ui.note_widget.set_bg_task_manager(task_manager)
def destroy(self):
"""
Should be called before the widget is closed
"""
self._data_manager.destroy()
self._task_manager = None
############################################################################
# properties
@property
def note_threads(self):
"""
The currently loaded note threads, keyed by Note entity id and
containing a list of Shotgun entity dictionaries. All note threads
currently displayed by the activity stream widget will be returned.
Example structure containing a Note, a Reply, and an attachment::
6040: [
{
'addressings_cc': [],
'addressings_to': [],
'client_note': False,
'content': 'This is a test note.',
'created_at': 1466477744.0,
'created_by': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'id': 6040,
'note_links': [
{
'id': 1167,
'name': '123',
'type': 'Shot'
},
{
'id': 6023,
'name': 'Scene_v030_123',
'type': 'Version'
}
],
'read_by_current_user': 'read',
'subject': "Jeff's Note on Scene_v030_123, 123",
'tasks': [
{
'id': 2118,
'name': 'Comp',
'type': 'Task'
}
],
'type': 'Note',
'user': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'user.ApiUser.image': None,
'user.ClientUser.image': None,
'user.HumanUser.image': 'https://url_to_file'
},
{
'content': 'test reply',
'created_at': 1469221928.0,
'id': 23,
'type': 'Reply',
'user': {
'id': 39,
'image': 'https://url_to_file',
'name': '<NAME>',
'type': 'HumanUser'
}
},
{
'attachment_links': [
{
'id': 6051,
'name': "Jeff's Note on Scene_v030_123, 123 - testing.",
'type': 'Note'
}
],
'created_at': 1469484693.0,
'created_by': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'id': 601,
'image': 'https://url_to_file',
'this_file': {
'content_type': 'image/png',
'id': 601,
'link_type': 'upload',
'name': 'screencapture_vrviim.png',
'type': 'Attachment',
'url': 'https://url_to_file'
},
'type': 'Attachment'
},
]
"""
return self._data_manager.note_threads
@property
def note_widget(self):
"""
Returns the :class:`~note_input_widget.NoteInputWidget` contained within
the ActivityStreamWidget. Note that this is the widget used for NEW note
input and not Note replies. To get the NoteInputWidget used for Note
replies, access can be found via :meth:`ReplyDialog.note_widget`.
"""
return self.ui.note_widget
def _get_clickable_user_icons(self):
"""
Whether user icons in the activity stream display as clickable.
If True, a pointing hand cursor will be shown when the mouse is
hovered over the icons, otherwise the default arrow cursor will be
used.
"""
return self._clickable_user_icons
def _set_clickable_user_icons(self, state):
self._clickable_user_icons = bool(state)
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget):
if state:
widget.set_user_thumb_cursor(QtCore.Qt.PointingHandCursor)
else:
widget.set_user_thumb_cursor(QtCore.Qt.ArrowCursor)
clickable_user_icons = property(
_get_clickable_user_icons,
_set_clickable_user_icons,
)
def _get_pre_submit_callback(self):
"""
The pre-submit callback. This is None if one is not set, or a Python
callable if it is. This callable is run prior to submission of a new
Note or Reply. Note that the first (and only) argument passed to the
callback will be the calling :class:`NoteInputWidget`.
:returns: Python callable or None
"""
return self._pre_submit_callback
def _set_pre_submit_callback(self, callback):
self._pre_submit_callback = callback
self.reply_dialog.note_widget.pre_submit_callback = callback
self.note_widget.pre_submit_callback = callback
pre_submit_callback = property(
_get_pre_submit_callback,
_set_pre_submit_callback,
)
def _get_allow_screenshots(self):
"""
Whether this activity stream is allowed to give the user access to a
button that performs screenshot operations.
"""
return self._allow_screenshots
def _set_allow_screenshots(self, state):
self._allow_screenshots = bool(state)
self.ui.note_widget.allow_screenshots(self._allow_screenshots)
allow_screenshots = property(
_get_allow_screenshots,
_set_allow_screenshots,
)
def _get_show_sg_stream_button(self):
"""
Whether the button to navigate to Shotgun is shown in the stream.
"""
return self._show_sg_stream_button
def _set_show_sg_stream_button(self, state):
"""
Sets whether to show the button to navigate to Shotgun.
:param state: True or False
"""
self._show_sg_stream_button = bool(state)
show_sg_stream_button = property(
_get_show_sg_stream_button,
_set_show_sg_stream_button,
)
def _get_version_items_playable(self):
"""
Whether the label representing a created Version entity is shown
as being "playable" within the UI. If True, then a play icon is
visible over the thumbnail image, and no icon overlay is shown
when False.
"""
return self._version_items_playable
def _set_version_items_playable(self, state):
self._version_items_playable = bool(state)
version_items_playable = property(
_get_version_items_playable,
_set_version_items_playable,
)
def _get_show_note_links(self):
"""
If True, lists out the parent entity as a list of clickable
items for each Note entity that is represented in the activity
stream.
"""
return self._show_note_links
def _set_show_note_links(self, state):
self._show_note_links = bool(state)
show_note_links = property(
_get_show_note_links,
_set_show_note_links,
)
def _get_highlight_new_arrivals(self):
"""
If True, highlights items in the activity stream that are new
since the last time data was loaded.
"""
return self._highlight_new_arrivals
def _set_highlight_new_arrivals(self, state):
self._highlight_new_arrivals = bool(state)
highlight_new_arrivals = property(
_get_highlight_new_arrivals,
_set_highlight_new_arrivals,
)
def | |
<reponame>gmweir/QuasiOptics
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 17:01:28 2021
@author: gawe
Functions dealing with rectangular patch antenna.
"""
import math
import numpy as np
from math import cos, sin, sqrt, pi, log10, atan2, acos, radians
from scipy import integrate
import scipy.integrate
import json
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from pybaseutils.utils import sph2cart as sph2cart1
# from pybaseutils.utils import cart2sph as cpatchart2sph1
# constants
light_velocity = 299792458
impedance = 50
# ======================================== #
# import plotly
# from plotly.offline import iplot
# import plotly.graph_objs as go
# plotly.offline.init_notebook_mode(connected=True)
def S_i(a):
temp = scipy.integrate.quad(lambda x:sin(x)/x,0,a)
return temp[0]
def J0(s):
temp = scipy.integrate.quad(lambda x:cos(s*sin(x)),0,pi)
temp = (1/pi)*temp[0]
return temp
def get_k(f):
lamda_0 = light_velocity/f
k0 = (2*pi)/lamda_0
return k0
def getG1 (W, f):
k0 = get_k (f)
X = k0 * W
I1 = -2 + cos(X) + X*S_i(X) + sin(X)/X
G1 = I1 / ( 120 * pi**2 )
return G1
def getG12 (W, k0, L):
temp = scipy.integrate.quad(lambda x: (((sin(k0*W*cos(x)/2)/cos(x))**2)*J0(k0*L*sin(x))*sin(x)**3), 0, pi)
G12 = (1/(120*pi**2))*temp[0]
return G12
def getGs(f, W, L):
G1 = getG1(W, f)
k0 = get_k(f)
G12 = getG12(W, k0, L)
return G1, G12
def input_impedance (f, W, L):
k0 = get_k (f)
G1, G12 = getGs(f, W, L)
Rin = 1/(2*(G1+G12))
print("Input Impedance:", Rin, "ohms")
return Rin
def inset_feed_position(Rin, L):
# R = 50.0
R = impedance
y0 = (L/pi)*(math.acos(sqrt(R/Rin)))
return y0
def get_directivity(G1, G12, W, f, I1, I2):
lamda_0 = light_velocity/f
g_12 = G12/G1
D_AF = 2/(1+g_12)
D0 = ((2*pi*W)/lamda_0)**2*(1/I1)
D2 = D0 * D_AF
DIR_1 = 10*log10(D2)
D_2 = ((2*pi*W)/lamda_0) ** 2 * (pi/I2)
DIR_2 = 10 * log10(D_2)
return DIR_1, DIR_2
# ======================================== #
def PatchFunction(thetaInDeg, phiInDeg, Freq, W, L, h, Er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence <NAME> 2nd Edition Page 745
"""
lamba = light_velocity / Freq
theta_in = math.radians(thetaInDeg)
phi_in = math.radians(phiInDeg)
ko = 2 * math.pi / lamba
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
theta = 1e-9 # Trap potential division by zero warning
if phi == 0:
phi = 1e-9
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving total effective length Leff = L + 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
Leff = L + 2 * dL
Weff = W # Calculate effective width Weff for patch, uses standard Er value.
heff = h * sqrt(Er)
# Patch pattern function of theta and phi, note the theta and phi for the function are defined differently to theta_in and phi_in
Numtr2 = sin(ko * heff * cos(phi) / 2)
Demtr2 = (ko * heff * cos(phi)) / 2
Fphi = (Numtr2 / Demtr2) * cos((ko * Leff / 2) * sin(phi))
Numtr1 = sin((ko * heff / 2) * sin(theta))
Demtr1 = ((ko * heff / 2) * sin(theta))
Numtr1a = sin((ko * Weff / 2) * cos(theta))
Demtr1a = ((ko * Weff / 2) * cos(theta))
Ftheta = ((Numtr1 * Numtr1a) / (Demtr1 * Demtr1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
rolloff_factor = 0.5 # 1=sharp, 0=softer
theta_in_deg = theta_in * 180 / math.pi # theta_in in Deg
F1 = 1 / (((rolloff_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001) # intermediate calc
PatEdgeSF = 1 / (F1 + 1) # Pattern scaling factor
UNF = 1.0006 # Unity normalisation factor for element pattern
if theta_in <= math.pi / 2:
Etot = Ftheta * Fphi * PatEdgeSF * UNF # Total pattern by pattern multiplication
else:
Etot = 0
return Etot
def patch_function(theta_in_deg, phi_in_deg, freq, w, l, h, er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence C.A. Balanis 2nd Edition Page 745
"""
lambda_ = light_velocity / freq
theta_in = math.radians(theta_in_deg)
phi_in = math.radians(phi_in_deg)
ko = 2 * math.pi / lambda_
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
# Trap potential division by zero warning
theta = 1e-9
if phi == 0:
phi = 1e-9
# Calculate effective dielectric constant for micro_strip line of width W on dielectric material of constant Er
e_ref = ((er + 1) / 2) + ((er - 1) / 2) * (1 + 12 * (h / w)) ** -0.5
# Calculate increase length dL of patch length L due to fringing fields at each end,
# giving total effective length Leff = L + 2*dL
f1 = (e_ref + 0.3) * (w / h + 0.264)
f2 = (e_ref - 0.258) * (w / h + 0.8)
d_l = h * 0.412 * (f1 / f2)
l_eff = l + 2 * d_l
# Calculate effective width Weff for patch, uses standard Er value.
w_eff = w
h_eff = h * sqrt(er)
# Patch pattern function of theta and phi,
# Note the theta and phi for the function are defined differently to theta_in and phi_in
num_tr_2 = sin(ko * h_eff * cos(phi) / 2)
dem_tr_2 = (ko * h_eff * cos(phi)) / 2
f_phi = (num_tr_2 / dem_tr_2) * cos((ko * l_eff / 2) * sin(phi))
num_tr_1 = sin((ko * h_eff / 2) * sin(theta))
dem_tr_1 = ((ko * h_eff / 2) * sin(theta))
num_tr_1a = sin((ko * w_eff / 2) * cos(theta))
dem_tr_1a = ((ko * w_eff / 2) * cos(theta))
f_theta = ((num_tr_1 * num_tr_1a) / (dem_tr_1 * dem_tr_1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
# 1=sharp, 0=softer
roll_off_factor = 0.5
# theta_in in Deg
theta_in_deg = theta_in * 180 / math.pi
# intermediate calc
f1 = 1 / (((roll_off_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001)
# Pattern scaling factor
pat_edge_sf = 1 / (f1 + 1)
# Unity normalisation factor for element pattern
UNF = 1.0006
# Total pattern by pattern multiplication
if theta_in <= math.pi / 2:
e_tot = f_theta * f_phi * pat_edge_sf * UNF
else:
e_tot = 0
return e_tot
def GetPatchFields(PhiStart, PhiStop, ThetaStart, ThetaStop, Freq, W, L, h, Er):
""""
Calculates the E-field | |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from string import ascii_uppercase
from .. import Provider as AddressProvider
class Provider(AddressProvider):
"""
Provider for addresses for en_PH locale
Like many things in the Philippines, even addresses are more complicated than necessary. This provider is already
a gross oversimplification, and it is still a lot more complicated VS providers from other locales despite taking
shortcuts. Below are some tidbits of information that, as a whole, shaped the design decisions of this provider.
- There are many levels of geopolitical division, thus many levels of local government:
* There are three major island groups - Luzon, Visayas, Mindanao
* Those major groups are divided into 17 different regions.
* Each region is divided into provinces with the exception of the National Capital Region aka Metro Manila.
* Each province is composed of multiple cities/municipalities.
* Metro Manila, like a province, is composed of multiple cities/municipalities, but it is a region.
* Each city/municipality is composed of multiple smaller local government units called barangays.
* In some places, some barangays are divided further, and as of 2019, there are 42,045 barangays on record.
- Metro Manila is part of Luzon geographically, but it is almost always treated as a separate entity politically,
economically, statistically, and so on, since it is home to around 13% of the population despite being only around
0.2% of the country's total land area.
- Names of cities, municipalities, and barangays vary a lot. Furthermore, if a place has a non-English name, there
will almost always be no English translation and vice-versa. It is essentially impossible to generate fake city,
municipality, and barangay names in a similar manner used in the other "en" locales while being locale specific.
- Subdivisions and other higher density housing (like high-rise condominiums) are popular in real estate.
- The 13th floor is omitted in buildings like in many parts of the world.
- The floor number distribution is partly based on the tallest buildings in the Philippines and partly anecdotal,
but the general idea is that the higher the floor number is, the lower probability of it appearing. Furthermore,
as the floor number approaches the highest floors of the tallest buildings, the probability plummets further.
- The address distribution is based on the official 2015 population census.
- Addresses should include a barangay, but it has been dropped to keep things sane, all things considered.
- In addition to numbered floors, buildings have ground floors and may have lower ground, upper ground, mezzanine,
and basement floors. Buildings may also have units on any of those floors, but the naming scheme varies, so they
have been dropped, again to keep things sane.
Sources:
- https://en.wikipedia.org/wiki/Provinces_of_the_Philippines
- https://en.wikipedia.org/wiki/List_of_cities_and_municipalities_in_the_Philippines
- https://en.wikipedia.org/wiki/Barangay
- https://en.wikipedia.org/wiki/Postal_addresses_in_the_Philippines
- https://en.wikipedia.org/wiki/List_of_ZIP_codes_in_the_Philippines
- https://www.phlpost.gov.ph/
- http://en.wikipedia.org/wiki/List_of_tallest_buildings_in_the_Philippines
- https://psa.gov.ph/sites/default/files/attachments/hsd/pressrelease/2015%20population%20counts%20Summary_0.xlsx
"""
metro_manila_postcodes = tuple(x for x in range(400, 1849))
luzon_province_postcodes = (
tuple(x for x in range(1850, 5000))
+ tuple(x for x in range(5100, 5600))
)
visayas_province_postcodes = (
tuple(x for x in range(5000, 5100))
+ tuple(x for x in range(5600, 5800))
+ tuple(x for x in range(6000, 6900))
)
mindanao_province_postcodes = (
tuple(x for x in range(7000, 7600))
+ tuple(x for x in range(8000, 8900))
+ tuple(x for x in range(9000, 9900))
)
postcodes = (
metro_manila_postcodes
+ luzon_province_postcodes
+ visayas_province_postcodes
+ mindanao_province_postcodes
)
metro_manila_lgus = (
'Caloocan', 'Las Piñas', 'Makati', 'Malabon', 'Mandaluyong', 'Manila', 'Marikina', 'Muntinlupa', 'Navotas',
'Parañaque', 'Pasay', 'Pasig', 'Pateros', 'Quezon City', 'San Juan', 'Taguig', 'Valenzuela',
)
province_lgus = (
'Aborlan', 'Abra de Ilog', 'Abucay', 'Abulug', 'Abuyog', 'Adams', 'Agdangan', 'Aglipay', 'Agno', 'Agoncillo',
'Agoo', 'Aguilar', 'Aguinaldo', 'Agutaya', 'Ajuy', 'Akbar', 'Al-Barka', 'Alabat', 'Alabel', 'Alamada',
'Alaminos', 'Alangalang', 'Albuera', 'Alburquerque', 'Alcala', 'Alcantara', 'Alcoy', 'Alegria', 'Aleosan',
'Alfonso Castañeda', 'Alfonso Lista', 'Alfonso', 'Aliaga', 'Alicia', 'Alilem', 'Alimodian', 'Alitagtag',
'Allacapan', 'Allen', 'Almagro', 'Almeria', 'Aloguinsan', 'Aloran', 'Altavas', 'Alubijid', 'Amadeo',
'Amai Manabilang', 'Ambaguio', 'Amlan', 'Ampatuan', 'Amulung', 'Anahawan', 'Anao', 'Anda', 'Angadanan', 'Angat',
'Angeles', 'Angono', 'Anilao', 'Anini-y', 'Antequera', 'Antipas', 'Antipolo', 'Apalit', 'Aparri', 'Araceli',
'Arakan', 'Arayat', 'Argao', 'Aringay', 'Aritao', 'Aroroy', 'Arteche', 'Asingan', 'Asipulo', 'Asturias',
'Asuncion', 'Atimonan', 'Atok', 'Aurora', 'Ayungon', 'Baao', 'Babatngon', 'Bacacay', 'Bacarra', 'Baclayon',
'Bacnotan', 'Baco', 'Bacolod-Kalawi', 'Bacolod', 'Bacolor', 'Bacong', 'Bacoor', 'Bacuag', 'Badian', 'Badiangan',
'Badoc', 'Bagabag', 'Bagac', 'Bagamanoc', 'Baganga', 'Baggao', 'Bago', 'Baguio', 'Bagulin', 'Bagumbayan',
'Bais', 'Bakun', 'Balabac', 'Balabagan', 'Balagtas', 'Balamban', 'Balanga', 'Balangiga', 'Balangkayan',
'Balaoan', 'Balasan', 'Balatan', 'Balayan', 'Balbalan', 'Baleno', 'Baler', 'Balete', 'Baliangao', 'Baliguian',
'Balilihan', 'Balindong', 'Balingasag', 'Balingoan', 'Baliuag', 'Ballesteros', 'Baloi', 'Balud', 'Balungao',
'Bamban', 'Bambang', 'Banate', 'Banaue', 'Banaybanay', 'Banayoyo', 'Banga', 'Bangar', 'Bangued', 'Bangui',
'Banguingui', 'Bani', 'Banisilan', 'Banna', 'Bansalan', 'Bansud', 'Bantay', 'Bantayan', 'Banton', 'Baras',
'Barbaza', 'Barcelona', 'Barili', 'Barira', 'Barlig', 'Barobo', 'Barotac Nuevo', 'Barotac Viejo', 'Baroy',
'Barugo', 'Basay', 'Basco', 'Basey', 'Basilisa', 'Basista', 'Basud', 'Batac', 'Batad', 'Batan', 'Batangas City',
'Bataraza', 'Bato', 'Batuan', 'Bauan', 'Bauang', 'Bauko', 'Baungon', 'Bautista', 'Bay', 'Bayabas', 'Bayambang',
'Bayang', 'Bayawan', 'Baybay', 'Bayog', 'Bayombong', 'Bayugan', 'Belison', 'Benito Soliven', 'Besao',
'Bien Unido', 'Bilar', 'Biliran', 'Binalbagan', 'Binalonan', 'Biñan', 'Binangonan', 'Bindoy', 'Bingawan',
'Binidayan', 'Binmaley', 'Binuangan', 'Biri', 'Bislig', 'Boac', 'Bobon', 'Bocaue', 'Bogo', 'Bokod', 'Bolinao',
'Boliney', 'Boljoon', 'Bombon', 'Bongabon', 'Bongabong', 'Bongao', 'Bonifacio', 'Bontoc', 'Borbon', 'Borongan',
'Boston', 'Botolan', 'Braulio E. Dujali', "Brooke's Point", 'Buadiposo-Buntong', 'Bubong', 'Bucay', 'Bucloc',
'Buenavista', 'Bugallon', 'Bugasong', 'Buguey', 'Buguias', 'Buhi', 'Bula', 'Bulakan', 'Bulalacao', 'Bulan',
'Buldon', 'Buluan', 'Bulusan', 'Bunawan', 'Burauen', 'Burdeos', 'Burgos', 'Buruanga', 'Bustos', 'Busuanga',
'Butig', 'Butuan', 'Buug', 'Caba', 'Cabadbaran', 'Cabagan', 'Cabanatuan', 'Cabangan', 'Cabanglasan',
'Cabarroguis', 'Cabatuan', 'Cabiao', 'Cabucgayan', 'Cabugao', 'Cabusao', 'Cabuyao', 'Cadiz', 'Cagayan de Oro',
'Cagayancillo', 'Cagdianao', 'Cagwait', 'Caibiran', 'Cainta', 'Cajidiocan', 'Calabanga', 'Calaca', 'Calamba',
'Calanasan', 'Calanogas', 'Calapan', 'Calape', 'Calasiao', 'Calatagan', 'Calatrava', 'Calauag', 'Calauan',
'Calayan', 'Calbayog', 'Calbiga', 'Calinog', 'Calintaan', 'Calubian', 'Calumpit', 'Caluya', 'Camalaniugan',
'Camalig', 'Camaligan', 'Camiling', 'Can-avid', 'Canaman', 'Candaba', 'Candelaria', 'Candijay', 'Candon',
'Candoni', 'Canlaon', 'Cantilan', 'Caoayan', 'Capalonga', 'Capas', 'Capoocan', 'Capul', 'Caraga', 'Caramoan',
'Caramoran', 'Carasi', 'Carcar', 'Cardona', 'Carigara', 'Carles', 'Carmen', 'Carmona', 'Carranglan',
'Carrascal', 'Casiguran', 'Castilla', 'Castillejos', 'Cataingan', 'Catanauan', 'Catarman', 'Catbalogan',
'Cateel', 'Catigbian', 'Catmon', 'Catubig', 'Cauayan', 'Cavinti', 'Cavite City', 'Cawayan', 'Cebu City',
'Cervantes', 'Clarin', 'Claver', 'Claveria', 'Columbio', 'Compostela', 'Concepcion', 'Conner', 'Consolacion',
'Corcuera', 'Cordon', 'Cordova', 'Corella', 'Coron', 'Cortes', 'Cotabato City', 'Cuartero', 'Cuenca', 'Culaba',
'Culasi', 'Culion', 'Currimao', 'Cuyapo', 'Cuyo', 'Daanbantayan', 'Daet', 'Dagami', 'Dagohoy', 'Daguioman',
'Dagupan', 'Dalaguete', 'Damulog', 'Danao', 'Dangcagan', 'Danglas', 'Dao', 'Dapa', 'Dapitan', 'Daraga', 'Daram',
'Dasmariñas', 'Dasol', 'Datu Abdullah Sangki', 'Datu Anggal Midtimbang', 'Datu Blah T. Sinsuat',
'Datu Hoffer Ampatuan', 'Datu Montawal', 'Datu Odin Sinsuat', 'Datu Paglas', 'Datu Piang', 'Datu Salibo',
'Datu Saudi-Ampatuan', 'Datu Unsay', 'Dauin', 'Dauis', 'Davao City', 'Del Carmen', 'Del Gallego',
'Delfin Albano', 'Diadi', 'Diffun', 'Digos', 'Dilasag', 'Dimasalang', 'Dimataling', 'Dimiao', 'Dinagat',
'Dinalungan', 'Dinalupihan', 'Dinapigue', 'Dinas', 'Dingalan', 'Dingle', 'Dingras', 'Dipaculao', 'Diplahan',
'Dipolog', 'Ditsaan-Ramain', 'Divilacan', 'Dolores', '<NAME>', '<NAME>', '<NAME>',
'Doña Remedios Trinidad', 'Donsol', 'Dueñas', 'Duero', 'Dulag', 'Dumaguete', 'Dumalag', 'Dumalinao', 'Dumalneg',
'Dumangas', 'Dumanjug', 'Dumaran', 'Dumarao', 'Dumingag', 'Dupax del Norte', 'Dupax del Sur', 'Echague',
'El Nido', 'El Salvador', 'Enrile', '<NAME>', '<NAME>', 'Escalante', 'Esperanza',
'Estancia', 'Famy', 'Ferrol', 'Flora', 'Floridablanca', 'Gabaldon', 'Gainza', 'Galimuyod', 'Gamay', 'Gamu',
'Ganassi', 'Gandara', 'Gapan', 'Garchitorena', '<NAME>', 'Gasan', 'Gattaran',
'General Emilio Aguinaldo', 'General Luna', 'General MacArthur', 'General Mamerto Natividad',
'General Mariano Alvarez', 'General Nakar', 'General Salipada K. Pendatun', 'General Santos', 'General Tinio',
'General Trias', 'Gerona', 'Getafe', 'Gigaquit', 'Gigmoto', 'Ginatilan', 'Gingoog', 'Giporlos', 'Gitagum',
'Glan', 'Gloria', 'Goa', 'Godod', 'Gonzaga', 'Governor Generoso', 'Gregorio del Pilar', 'Guagua', 'Gubat',
'Guiguinto', 'Guihulngan', 'Guimba', 'Guimbal', 'Guinayangan', 'Guindulman', 'Guindulungan', 'Guinobatan',
'Guinsiliban', 'Guipos', 'Guiuan', 'Gumaca', 'Gutalac', '<NAME>', '<NAME>',
'Hadji Pang<NAME>ahil', 'Hagonoy', 'Hamtic', 'Hermosa', 'Hernani', 'Hilongos', 'Himamaylan', 'Hinabangan',
'Hinatuan', 'Hindang', 'Hingyon', 'Hinigaran', 'Hinoba-an', 'Hinunangan', 'Hinundayan', 'Hungduan', 'Iba',
'Ibaan', 'Ibajay', 'Igbaras', 'Iguig', 'Ilagan', 'Iligan', 'Ilog', 'Iloilo City', 'Imelda', 'Impasugong',
'Imus', 'Inabanga', 'Indanan', 'Indang', 'Infanta', 'Initao', 'Inopacan', 'Ipil', 'Iriga', 'Irosin', 'Isabel',
'Isabela City', 'Isabela', 'Isulan', 'Itbayat', 'Itogon', 'Ivana', 'Ivisan', 'Jabonga', 'Jaen', 'Jagna',
'Jalajala', 'Jamindan', 'Janiuay', 'Jaro', 'Jasaan', 'Javier', 'Jiabong', 'Jimalalud', 'Jimenez', 'Jipapad',
'Jolo', 'Jomalig', 'Jones', 'Jordan', '<NAME>', '<NAME>', '<NAME>', 'Josefina',
'Jovellar', 'Juban', 'Julita', 'Kabacan', 'Kabankalan', 'Kabasalan', 'Kabayan', 'Kabugao', 'Kabuntalan',
'Kadingilan', 'Kalamansig', 'Kalawit', 'Kalayaan', 'Kalibo', 'Kalilangan', 'Kalingalan Caluang', 'Kananga',
'Kapai', 'Kapalong', 'Kapangan', 'Kapatagan', 'Kasibu', 'Katipunan', 'Kauswagan', 'Kawayan', 'Kawit', 'Kayapa',
'Kiamba', 'Kiangan', 'Kibawe', 'Kiblawan', 'Kibungan', 'Kidapawan', 'Kinoguitan', 'Kitaotao', 'Kitcharao',
'Kolambugan', 'Koronadal', 'Kumalarang', 'La Carlota', 'La Castellana', 'La Libertad', 'La Paz', 'La Trinidad',
'Laak', 'Labangan', 'Labason', 'Labo', 'Labrador', 'Lacub', 'Lagangilang', 'Lagawe', 'Lagayan', 'Lagonglong',
'Lagonoy', 'Laguindingan', | |
config = self.GetMethodConfig('SetAccelerator')
return self._RunMethod(
config, request, global_params=global_params)
SetAccelerator.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:setAccelerator',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.setAccelerator',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:setAccelerator',
request_field='setInstanceAcceleratorRequest',
request_type_name='NotebooksProjectsLocationsInstancesSetAcceleratorRequest',
response_type_name='Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (NotebooksProjectsLocationsInstancesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:setIamPolicy',
http_method='POST',
method_id='notebooks.projects.locations.instances.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='NotebooksProjectsLocationsInstancesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def SetLabels(self, request, global_params=None):
r"""Replaces all the labels of an Instance.
Args:
request: (NotebooksProjectsLocationsInstancesSetLabelsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetLabels')
return self._RunMethod(
config, request, global_params=global_params)
SetLabels.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:setLabels',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.setLabels',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:setLabels',
request_field='setInstanceLabelsRequest',
request_type_name='NotebooksProjectsLocationsInstancesSetLabelsRequest',
response_type_name='Operation',
supports_download=False,
)
def SetMachineType(self, request, global_params=None):
r"""Updates the machine type of a single Instance.
Args:
request: (NotebooksProjectsLocationsInstancesSetMachineTypeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMachineType')
return self._RunMethod(
config, request, global_params=global_params)
SetMachineType.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:setMachineType',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.setMachineType',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:setMachineType',
request_field='setInstanceMachineTypeRequest',
request_type_name='NotebooksProjectsLocationsInstancesSetMachineTypeRequest',
response_type_name='Operation',
supports_download=False,
)
def Start(self, request, global_params=None):
r"""Starts a notebook instance.
Args:
request: (NotebooksProjectsLocationsInstancesStartRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Start')
return self._RunMethod(
config, request, global_params=global_params)
Start.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:start',
http_method='POST',
method_id='notebooks.projects.locations.instances.start',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:start',
request_field='startInstanceRequest',
request_type_name='NotebooksProjectsLocationsInstancesStartRequest',
response_type_name='Operation',
supports_download=False,
)
def Stop(self, request, global_params=None):
r"""Stops a notebook instance.
Args:
request: (NotebooksProjectsLocationsInstancesStopRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
Stop.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:stop',
http_method='POST',
method_id='notebooks.projects.locations.instances.stop',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:stop',
request_field='stopInstanceRequest',
request_type_name='NotebooksProjectsLocationsInstancesStopRequest',
response_type_name='Operation',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (NotebooksProjectsLocationsInstancesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:testIamPermissions',
http_method='POST',
method_id='notebooks.projects.locations.instances.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='NotebooksProjectsLocationsInstancesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
def UpdateConfig(self, request, global_params=None):
r"""Update Notebook Instance configurations.
Args:
request: (NotebooksProjectsLocationsInstancesUpdateConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('UpdateConfig')
return self._RunMethod(
config, request, global_params=global_params)
UpdateConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:updateConfig',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.updateConfig',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:updateConfig',
request_field='updateInstanceConfigRequest',
request_type_name='NotebooksProjectsLocationsInstancesUpdateConfigRequest',
response_type_name='Operation',
supports_download=False,
)
def UpdateMetadataItems(self, request, global_params=None):
r"""Add/update metadata items for an instance.
Args:
request: (NotebooksProjectsLocationsInstancesUpdateMetadataItemsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(UpdateInstanceMetadataItemsResponse) The response message.
"""
config = self.GetMethodConfig('UpdateMetadataItems')
return self._RunMethod(
config, request, global_params=global_params)
UpdateMetadataItems.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:updateMetadataItems',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.updateMetadataItems',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:updateMetadataItems',
request_field='updateInstanceMetadataItemsRequest',
request_type_name='NotebooksProjectsLocationsInstancesUpdateMetadataItemsRequest',
response_type_name='UpdateInstanceMetadataItemsResponse',
supports_download=False,
)
def UpdateShieldedInstanceConfig(self, request, global_params=None):
r"""Updates the Shielded instance configuration of a single Instance.
Args:
request: (NotebooksProjectsLocationsInstancesUpdateShieldedInstanceConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('UpdateShieldedInstanceConfig')
return self._RunMethod(
config, request, global_params=global_params)
UpdateShieldedInstanceConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:updateShieldedInstanceConfig',
http_method='PATCH',
method_id='notebooks.projects.locations.instances.updateShieldedInstanceConfig',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:updateShieldedInstanceConfig',
request_field='updateShieldedInstanceConfigRequest',
request_type_name='NotebooksProjectsLocationsInstancesUpdateShieldedInstanceConfigRequest',
response_type_name='Operation',
supports_download=False,
)
def Upgrade(self, request, global_params=None):
r"""Upgrades a notebook instance to the latest version.
Args:
request: (NotebooksProjectsLocationsInstancesUpgradeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Upgrade')
return self._RunMethod(
config, request, global_params=global_params)
Upgrade.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:upgrade',
http_method='POST',
method_id='notebooks.projects.locations.instances.upgrade',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:upgrade',
request_field='upgradeInstanceRequest',
request_type_name='NotebooksProjectsLocationsInstancesUpgradeRequest',
response_type_name='Operation',
supports_download=False,
)
def UpgradeInternal(self, request, global_params=None):
r"""Allows notebook instances to call this endpoint to upgrade themselves. Do not use this method directly.
Args:
request: (NotebooksProjectsLocationsInstancesUpgradeInternalRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('UpgradeInternal')
return self._RunMethod(
config, request, global_params=global_params)
UpgradeInternal.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:upgradeInternal',
http_method='POST',
method_id='notebooks.projects.locations.instances.upgradeInternal',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:upgradeInternal',
request_field='upgradeInstanceInternalRequest',
request_type_name='NotebooksProjectsLocationsInstancesUpgradeInternalRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(NotebooksV1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (NotebooksProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='notebooks.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='cancelOperationRequest',
request_type_name='NotebooksProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (NotebooksProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='notebooks.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='NotebooksProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (NotebooksProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='notebooks.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='NotebooksProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (NotebooksProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='notebooks.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='NotebooksProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsRuntimesService(base_api.BaseApiService):
"""Service class for the projects_locations_runtimes resource."""
_NAME = 'projects_locations_runtimes'
def __init__(self, client):
super(NotebooksV1.ProjectsLocationsRuntimesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new Runtime in a given project and location.
Args:
request: (NotebooksProjectsLocationsRuntimesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/runtimes',
http_method='POST',
method_id='notebooks.projects.locations.runtimes.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['runtimeId'],
relative_path='v1/{+parent}/runtimes',
request_field='runtime',
request_type_name='NotebooksProjectsLocationsRuntimesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a single Runtime.
Args:
request: (NotebooksProjectsLocationsRuntimesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/runtimes/{runtimesId}',
http_method='DELETE',
method_id='notebooks.projects.locations.runtimes.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='NotebooksProjectsLocationsRuntimesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single Runtime. The location must be a regional endpoint rather than zonal.
Args:
request: (NotebooksProjectsLocationsRuntimesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Runtime) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/runtimes/{runtimesId}',
http_method='GET',
method_id='notebooks.projects.locations.runtimes.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='NotebooksProjectsLocationsRuntimesGetRequest',
response_type_name='Runtime',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. | |
p.x = 42
assert p.x == 42
assert typeof(p.y) is BArray
assert len(p.y) == 0
assert p.y == cast(BIntP, p) + 1
#
p = newp(new_pointer_type(BStruct), [100])
assert p.x == 100
assert len(p.y) == 0
#
# Tests for
# ffi.new("struct_with_var_array *", [field.., [the_array_items..]])
# ffi.new("struct_with_var_array *", [field.., array_size])
plist = []
for i in range(20):
if i % 2 == 0:
p = newp(new_pointer_type(BStruct), [100, [200, i, 400]])
else:
p = newp(new_pointer_type(BStruct), [100, 3])
p.y[1] = i
p.y[0] = 200
assert p.y[2] == 0
p.y[2] = 400
assert len(p.y) == 3
assert len(p[0].y) == 3
assert len(buffer(p)) == sizeof(BInt) * 4
assert sizeof(p[0]) == sizeof(BInt) * 4
plist.append(p)
for i in range(20):
p = plist[i]
assert p.x == 100
assert p.y[0] == 200
assert p.y[1] == i
assert p.y[2] == 400
assert list(p.y) == [200, i, 400]
#
# the following assignment works, as it normally would, for any array field
p.y = [501, 601]
assert list(p.y) == [501, 601, 400]
p[0].y = [500, 600]
assert list(p[0].y) == [500, 600, 400]
assert repr(p) == "<cdata 'foo *' owning %d bytes>" % (
sizeof(BStruct) + 3 * sizeof(BInt),)
assert repr(p[0]) == "<cdata 'foo' owning %d bytes>" % (
sizeof(BStruct) + 3 * sizeof(BInt),)
assert sizeof(p[0]) == sizeof(BStruct) + 3 * sizeof(BInt)
#
# from a non-owning pointer, we can't get the length
q = cast(new_pointer_type(BStruct), p)
assert q.y[0] == 500
assert q[0].y[0] == 500
py.test.raises(TypeError, len, q.y)
py.test.raises(TypeError, len, q[0].y)
assert typeof(q.y) is BIntP
assert typeof(q[0].y) is BIntP
assert sizeof(q[0]) == sizeof(BStruct)
#
# error cases
with pytest.raises(IndexError):
p.y[4]
with pytest.raises(TypeError):
p.y = cast(BIntP, 0)
with pytest.raises(TypeError):
p.y = 15
with pytest.raises(TypeError):
p.y = None
#
# accepting this may be specified by the C99 standard,
# or a GCC strangeness...
BStruct2 = new_struct_type("bar")
complete_struct_or_union(BStruct2, [('f', BStruct),
('n', BInt)])
p = newp(new_pointer_type(BStruct2), {'n': 42})
assert p.n == 42
#
# more error cases
py.test.raises(TypeError, newp, new_pointer_type(BStruct), [100, None])
BArray4 = new_array_type(BIntP, 4)
BStruct4 = new_struct_type("test4")
complete_struct_or_union(BStruct4, [('a', BArray4)]) # not varsized
py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [None])
py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [4])
p = newp(new_pointer_type(BStruct4), [[10, 20, 30]])
assert p.a[0] == 10
assert p.a[1] == 20
assert p.a[2] == 30
assert p.a[3] == 0
#
# struct of struct of varsized array
BStruct2 = new_struct_type("bar")
complete_struct_or_union(BStruct2, [('head', BInt),
('tail', BStruct)])
for i in range(2): # try to detect heap overwrites
p = newp(new_pointer_type(BStruct2), [100, [200, list(range(50))]])
assert p.tail.y[49] == 49
def test_struct_array_no_length_explicit_position():
BInt = new_primitive_type("int")
BIntP = new_pointer_type(BInt)
BArray = new_array_type(BIntP, None)
BStruct = new_struct_type("foo")
complete_struct_or_union(BStruct, [('x', BArray, -1, 0), # actually 3 items
('y', BInt, -1, 12)])
p = newp(new_pointer_type(BStruct), [[10, 20], 30])
assert p.x[0] == 10
assert p.x[1] == 20
assert p.x[2] == 0
assert p.y == 30
p = newp(new_pointer_type(BStruct), {'x': [40], 'y': 50})
assert p.x[0] == 40
assert p.x[1] == 0
assert p.x[2] == 0
assert p.y == 50
p = newp(new_pointer_type(BStruct), {'y': 60})
assert p.x[0] == 0
assert p.x[1] == 0
assert p.x[2] == 0
assert p.y == 60
#
# This "should" work too, allocating a larger structure
# (a bit strange in this case, but useful in general)
plist = []
for i in range(20):
p = newp(new_pointer_type(BStruct), [[10, 20, 30, 40, 50, 60, 70]])
plist.append(p)
for i in range(20):
p = plist[i]
assert p.x[0] == 10
assert p.x[1] == 20
assert p.x[2] == 30
assert p.x[3] == 40 == p.y
assert p.x[4] == 50
assert p.x[5] == 60
assert p.x[6] == 70
def test_struct_array_not_aligned():
# struct a { int x; char y; char z[]; };
# ends up of size 8, but 'z' is at offset 5
BChar = new_primitive_type("char")
BInt = new_primitive_type("int")
BCharP = new_pointer_type(BChar)
BArray = new_array_type(BCharP, None)
BStruct = new_struct_type("foo")
complete_struct_or_union(BStruct, [('x', BInt),
('y', BChar),
('z', BArray)])
assert sizeof(BStruct) == 2 * size_of_int()
def offsetof(BType, fieldname):
return typeoffsetof(BType, fieldname)[1]
base = offsetof(BStruct, 'z')
assert base == size_of_int() + 1
#
p = newp(new_pointer_type(BStruct), {'z': 3})
assert sizeof(p[0]) == base + 3
q = newp(new_pointer_type(BStruct), {'z': size_of_int()})
assert sizeof(q) == size_of_ptr()
assert sizeof(q[0]) == base + size_of_int()
assert len(p.z) == 3
assert len(p[0].z) == 3
assert len(q.z) == size_of_int()
assert len(q[0].z) == size_of_int()
def test_ass_slice():
BChar = new_primitive_type("char")
BArray = new_array_type(new_pointer_type(BChar), None)
p = newp(BArray, b"foobar")
p[2:5] = [b"*", b"Z", b"T"]
p[1:3] = b"XY"
assert list(p) == [b"f", b"X", b"Y", b"Z", b"T", b"r", b"\x00"]
with pytest.raises(TypeError):
p[1:5] = u+'XYZT'
with pytest.raises(TypeError):
p[1:5] = [1, 2, 3, 4]
#
for typename in ["wchar_t", "char16_t", "char32_t"]:
BUniChar = new_primitive_type(typename)
BArray = new_array_type(new_pointer_type(BUniChar), None)
p = newp(BArray, u+"foobar")
p[2:5] = [u+"*", u+"Z", u+"T"]
p[1:3] = u+"XY"
assert list(p) == [u+"f", u+"X", u+"Y", u+"Z", u+"T", u+"r", u+"\x00"]
with pytest.raises(TypeError):
p[1:5] = b'XYZT'
with pytest.raises(TypeError):
p[1:5] = [1, 2, 3, 4]
def test_void_p_arithmetic():
BVoid = new_void_type()
BInt = new_primitive_type("intptr_t")
p = cast(new_pointer_type(BVoid), 100000)
assert int(cast(BInt, p)) == 100000
assert int(cast(BInt, p + 42)) == 100042
assert int(cast(BInt, p - (-42))) == 100042
assert (p + 42) - p == 42
q = cast(new_pointer_type(new_primitive_type("char")), 100000)
with pytest.raises(TypeError):
p - q
with pytest.raises(TypeError):
q - p
with pytest.raises(TypeError):
p + cast(new_primitive_type('int'), 42)
with pytest.raises(TypeError):
p - cast(new_primitive_type('int'), 42)
def test_sizeof_sliced_array():
BInt = new_primitive_type("int")
BArray = new_array_type(new_pointer_type(BInt), 10)
p = newp(BArray, None)
assert sizeof(p[2:9]) == 7 * sizeof(BInt)
def test_packed():
BLong = new_primitive_type("long")
BChar = new_primitive_type("char")
BShort = new_primitive_type("short")
for extra_args in [(SF_PACKED,), (0, 1)]:
BStruct = new_struct_type("struct foo")
complete_struct_or_union(BStruct, [('a1', BLong, -1),
('a2', BChar, -1),
('a3', BShort, -1)],
None, -1, -1, *extra_args)
d = BStruct.fields
assert len(d) == 3
assert d[0][0] == 'a1'
assert d[0][1].type is BLong
assert d[0][1].offset == 0
assert d[0][1].bitshift == -1
assert d[0][1].bitsize == -1
assert d[1][0] == 'a2'
assert d[1][1].type is BChar
assert d[1][1].offset == sizeof(BLong)
assert d[1][1].bitshift == -1
assert d[1][1].bitsize == -1
assert d[2][0] == 'a3'
assert d[2][1].type is BShort
assert d[2][1].offset == sizeof(BLong) + sizeof(BChar)
assert d[2][1].bitshift == -1
assert d[2][1].bitsize == -1
assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort)
assert alignof(BStruct) == 1
#
BStruct2 = new_struct_type("struct foo")
complete_struct_or_union(BStruct2, [('b1', BChar, -1),
('b2', BLong, -1)],
None, -1, -1, 0, 2)
d = BStruct2.fields
assert len(d) == 2
assert d[0][0] == 'b1'
assert d[0][1].type is BChar
assert d[0][1].offset == 0
assert d[0][1].bitshift == -1
assert d[0][1].bitsize == -1
assert d[1][0] == 'b2'
assert d[1][1].type is BLong
assert d[1][1].offset == 2
assert d[1][1].bitshift == -1
assert d[1][1].bitsize == -1
assert sizeof(BStruct2) == 2 + sizeof(BLong)
assert alignof(BStruct2) == 2
def test_packed_with_bitfields():
if sys.platform == "win32":
py.test.skip("testing gcc behavior")
BLong = new_primitive_type("long")
BChar = new_primitive_type("char")
BStruct = new_struct_type("struct foo")
py.test.raises(NotImplementedError,
complete_struct_or_union,
BStruct, [('a1', BLong, 30),
('a2', BChar, 5)],
None, -1, -1, SF_PACKED)
def test_from_buffer():
import array
a = array.array('H', [10000, 20000, 30000])
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
c = from_buffer(BCharA, a)
assert typeof(c) is BCharA
assert len(c) == 6
assert repr(c) == "<cdata 'char[]' buffer len 6 from 'array.array' object>"
p = new_pointer_type(new_primitive_type("unsigned short"))
cast(p, c)[1] += 500
assert list(a) == [10000, 20500, 30000]
def test_from_buffer_not_str_unicode():
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
p1 = from_buffer(BCharA, b"foo")
assert p1 == from_buffer(BCharA, b"foo")
import gc; gc.collect()
assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
# Python 2 only
contents = from_buffer(BCharA, buffer(b"foo"))
assert len(contents) == len(p1)
for i in range(len(contents)):
assert contents[i] == p1[i]
p4 = buffer(u+"foo")
contents = from_buffer(BCharA, buffer(u+"foo"))
assert len(contents) == len(p4)
for i in range(len(contents)):
assert contents[i] == p4[i]
try:
from __builtin__ import memoryview
except ImportError:
pass
else:
contents = from_buffer(BCharA, memoryview(b"foo"))
assert len(contents) == len(p1)
for i in range(len(contents)):
assert contents[i] == p1[i]
def test_from_buffer_bytearray():
a = bytearray(b"xyz")
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
p = from_buffer(BCharA, a)
assert typeof(p) is BCharA
assert len(p) == 3
assert repr(p) == "<cdata 'char[]' buffer len 3 from 'bytearray' object>"
assert p[2] == b"z"
p[2] = b"."
assert a[2] == | |
<reponame>CavallucciMartina/wav2vec2-sprint
from audiomentations import (
Compose,
AddGaussianNoise,
AddGaussianSNR,
ClippingDistortion,
FrequencyMask,
Gain,
LoudnessNormalization,
Normalize,
PitchShift,
PolarityInversion,
Shift,
TimeMask,
TimeStretch,
)
import time
import torchaudio
from torch import nn
import json
import re
from transformers import (
Trainer,
TrainingArguments,
Wav2Vec2Processor,
Wav2Vec2FeatureExtractor,
Wav2Vec2CTCTokenizer,
Wav2Vec2ForCTC
)
from datasets import ClassLabel
import random
import pandas as pd
from IPython.display import display, HTML
import collections
from datasets import Dataset, load_dataset, load_metric, concatenate_datasets
import argparse
from os import listdir, walk
from os.path import isfile, join
import IPython.display as ipd
import numpy as np
import librosa
import torch
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
from torch.utils.data import Dataset, DataLoader
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __init__(self, processor, padding=True, apply_gaussian_noise_with_p=0.5, apply_gain_with_p=0.5, apply_pitch_shift_with_p=0.5,
apply_time_stretch_with_p=0.5, sample_rate=16_000):
self.processor = processor
self.padding = padding
self.apply_gaussian_noise_with_p = apply_gaussian_noise_with_p
self.apply_gain_with_p = apply_gain_with_p
self.apply_pitch_shift_with_p = apply_pitch_shift_with_p
self.apply_time_stretch_with_p = apply_time_stretch_with_p
self.sample_rate = sample_rate
self.augmentator = None
if self.apply_gaussian_noise_with_p + self.apply_gain_with_p + self.apply_pitch_shift_with_p + self.apply_time_stretch_with_p > 0:
self.augmentator = Compose([
TimeStretch(min_rate=0.8, max_rate=1.2, leave_length_unchanged=False, p=self.apply_time_stretch_with_p),
PitchShift(min_semitones=-1, max_semitones=1, p=self.apply_pitch_shift_with_p),
Gain(min_gain_in_db=-1, max_gain_in_db=1, p=self.apply_gain_with_p),
AddGaussianNoise(min_amplitude=0.0001, max_amplitude=0.001, p=self.apply_gaussian_noise_with_p),
])
def _apply_augmentation(self, input_values: List[float]):
"""apply some audio augmentations in the given input_values"""
if self.augmentator is not None:
return self.augmentator(samples=np.array(input_values), sample_rate=self.sample_rate).tolist()
else:
return input_values
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": self._apply_augmentation(feature["input_values"])} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
class CTCTrainer(Trainer):
def __init__(self, length_field_name="length", upload_model_to_wandb_each_step=None, lr_warmup_ratio=0.1,
lr_constant_ratio=0.4, sampling_rate=16_000, **kwargs):
super().__init__(**kwargs)
self.length_field_name = length_field_name
self.upload_model_to_wandb_each_step = upload_model_to_wandb_each_step
self.lr_warmup_ratio = lr_warmup_ratio
self.lr_constant_ratio = lr_constant_ratio
self.sampling_rate = sampling_rate
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, lengths=lengths, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
)
else:
return super()._get_train_sampler()
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
This method was built based on https://arxiv.org/pdf/2006.13979 :
"The learning rate schedule has three phases: warm up for the first 10% of updates,
keep constant for 40% and then linearly decay for the remainder"
Args:
num_training_steps (int): The number of training steps to do.
"""
def lr_lambda(current_step):
warmup_steps = int(num_training_steps * self.lr_warmup_ratio)
constant_steps = int(num_training_steps * self.lr_constant_ratio)
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
elif (self.lr_warmup_ratio + self.lr_constant_ratio) == 1.0 or current_step < (warmup_steps + constant_steps):
return 1
else:
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - (warmup_steps + constant_steps)))
)
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda)
def _apply_some_audio_transformations(self, inputs):
"""Perform some audio transformations"""
# adding an extra dimmention for the channels as our data is mono audio and
# the expected shape of input for torch_audiomentations is (batch_size, num_channels, num_samples)
transformed_inputs = inputs["input_values"].unsqueeze(1)
transformed_inputs = self.augmentator(transformed_inputs, sample_rate=self.sampling_rate)
# returning the inputs to the original shape
transformed_inputs = torch.squeeze(transformed_inputs, 1)
inputs["input_values"] = transformed_inputs
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
loss = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
loss = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def show_random_elements(dataset, num_examples=1):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset)-1)
while pick in picks:
pick = random.randint(0, len(dataset)-1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
print(df)
def compute_metrics(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
pred_str = processor.batch_decode(pred_ids)
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
print("#### Loading dataset")
print("####MLLS")
main_dir = "../../mls_italian_opus"
train_dir = join(main_dir, 'train')
file_transcripts = 'transcripts.txt'
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-italian"
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "?", "?", "·", "?", "~", "?",
"?", "?", "?", "?", "«", "»", "„", "“", "”", "?", "?", "‘", "’", "«", "»", "(", ")", "[", "]",
"{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "?", "‹", "›", "©", "®", "—", "?", "?",
"?", "?", "?", "?", "~", "?", ",", "{", "}", "(", ")", "[", "]", "?", "?", "?", "?",
"?", "?", "?", "?", "?", "?", "?", ":", "!", "?", "?", "?", "/", "\\", "º", "-", "^", "?", "ˆ"]
chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"
def remove_special_characters_mlls(sentence):
sentence = re.sub(chars_to_ignore_regex, "", sentence).strip().upper() + " "
return sentence
def create_hug_dataset(split_directory, split_train):
list_opus = []
labels_dict = {}
for (dirpath, dirnames, filenames) in walk(split_directory):
list_opus += [join(dirpath, file) for file in filenames if file.endswith(".opus")]
with open(join(split_directory, file_transcripts), 'r') as f:
content = f.read()
sentences = content.split(sep="\n")
for sent in sentences:
if(sent != ''):
sent = re.sub(' +', ' ', sent)
sent = sent.split("\t", maxsplit=1)
labels_dict[sent[0]] = sent[1]
audio_dict = {opus.split("/")[-1].split(".")[0]: opus for opus in list_opus}
print("#### Removing special characters from labels mlls")
labels_dict = {k: remove_special_characters_mlls(v) for k, v in labels_dict.items()}
dict_dataset = {'path': [], 'sentence': []}
for k, v in audio_dict.items():
dict_dataset['path'].append(v)
dict_dataset['sentence'].append(labels_dict[k])
tot_len = len(dict_dataset["path"])
n_data_train = int((tot_len*split_train)/100)
print(f"N DATA TRAIN mls: {n_data_train}, N DATA VAL mls: {tot_len-n_data_train}")
hug_dataset = Dataset.from_dict(dict_dataset)
return Dataset.from_dict(hug_dataset[:n_data_train]), Dataset.from_dict(hug_dataset[n_data_train:])
hug_dataset_train, hug_dataset_val = create_hug_dataset(train_dir, split_train=80)
show_random_elements(hug_dataset_train, 4)
processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
data_collator = DataCollatorCTCWithPadding(
processor=processor,
padding=True,
apply_gaussian_noise_with_p=0.5,
apply_gain_with_p=0.5,
apply_pitch_shift_with_p=0.5,
apply_time_stretch_with_p=0.5,
sample_rate=16_000,
)
wer_metric = load_metric("wer")
print("#### Creating model")
model = Wav2Vec2ForCTC.from_pretrained(
MODEL_ID,
attention_dropout=0.1,
activation_dropout=0.1,
hidden_dropout=0.1,
feat_proj_dropout=0.0,
mask_time_prob=0.05,
layerdrop=0.1,
gradient_checkpointing=True,
ctc_loss_reduction="mean",
pad_token_id=processor.tokenizer.pad_token_id,
vocab_size=len(processor.tokenizer),
ctc_zero_infinity=True
)
model.freeze_feature_extractor()
class CustomDataset(Dataset):
def __init__(self, dataset, processor):
self.dataset = dataset
self.processor = processor
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = self.dataset[idx]
return self.preprocessing(item)
def preprocessing(self, item):
speech_array, sampling_rate = torchaudio.load(item["path"])
item["speech"] = speech_array[0].numpy()
item["sampling_rate"] = sampling_rate
item["target_text"] = item["sentence"]
del item["path"], item["sentence"]
item["speech"] = | |
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
import numpy
import time
import logging
import copy
import json
from os import path
from tools import mureilbuilder, mureilexception, mureiloutput, globalconfig
from tools import configurablebase, mureilbase
from generator import singlepassgenerator
logger = logging.getLogger(__name__)
class GeMureilMaster(mureilbase.MasterInterface, configurablebase.ConfigurableBase):
def get_full_config(self):
if not self.is_configured:
return None
# Will return configs collected from all objects, assembled into full_config.
full_conf = {}
full_conf['Master'] = self.config
full_conf[self.config['data']] = self.data.get_config()
full_conf[self.config['global']] = self.global_config
for gen_type in self.dispatch_order:
gen = getattr(self, gen_type)
full_conf[self.config[gen_type]] = gen.get_config()
return full_conf
def set_config(self, full_config, extra_data):
# Master explicitly does not copy in the global variables. It is too confusing
# to combine those with flags, defaults and values defined in the config files.
self.load_initial_config(full_config['Master'])
# Get the global variables
mureilbuilder.check_section_exists(full_config, self.config['global'])
if 'model' not in full_config[self.config['global']]:
full_config[self.config['global']]['model'] = 'tools.globalconfig.GlobalBase'
self.global_calc = mureilbuilder.create_instance(full_config, None, self.config['global'],
mureilbase.ConfigurableInterface)
self.global_config = self.global_calc.get_config()
# Now check the dispatch_order, to get a list of the generators
for gen in self.config['dispatch_order']:
self.config_spec += [(gen, None, None)]
self.update_from_config_spec()
self.check_config()
self.dispatch_order = self.config['dispatch_order']
# Set up the data class and get the data
self.data = mureilbuilder.create_instance(full_config, self.global_config, self.config['data'],
mureilbase.DataSinglePassInterface)
self.global_calc.update_config({'data_ts_length': self.data.get_ts_length()})
self.global_calc.post_data_global_calcs()
self.global_config = self.global_calc.get_config()
# Instantiate the generator objects, set their data, determine their param requirements
param_count = 0
self.gen_list = {}
self.gen_params = {}
for i in range(len(self.dispatch_order)):
gen_type = self.dispatch_order[i]
# Build the generator instances
gen = mureilbuilder.create_instance(full_config, self.global_config,
self.config[gen_type], singlepassgenerator.SinglePassGeneratorBase)
self.gen_list[gen_type] = gen
# Supply data as requested by the generator
mureilbuilder.supply_single_pass_data(gen, self.data, gen_type)
# Determine how many parameters this generator requires and
# allocate the slots in the params list
params_req = gen.get_param_count()
if (params_req == 0):
self.gen_params[gen_type] = (0, 0)
else:
self.gen_params[gen_type] = (param_count,
param_count + params_req)
param_count += params_req
self.param_count = param_count
self.is_configured = True
def get_config_spec(self):
"""Return a list of tuples of format (name, conversion function, default),
e.g. ('capex', float, 2.0). Put None if no conversion required, or if no
default value, e.g. ('name', None, None)
Configuration:
data: The name of the configuration file section specifying the data class to use and its
configuration parameters. Defaults to 'Data'.
global: The name of the configuration file section specifying the global configuration parameters.
Defaults to 'Global'.
dispatch_order: a list of strings specifying the names of the generator models to dispatch, in order,
to meet the demand. All of these models then require a parameter defining the configuration file
section where they are configured. e.g. dispatch_order: solar wind gas. This requires additional
parameters, for example solar: Solar, wind: Wind and gas: Instant_Gas to be defined, and corresponding
sections Solar, Wind and Instant_Gas to configure those models.
iterations: The number of iterations of the algorithm to execute. Defaults to 100.
output_file: The filename to write the final output data to. Defaults to 'ge.pkl'.
do_plots: Defaults to False. If True, output plots at the end of the run.
year_list: A list of years specifying the start year of the periods to run, e.g.
year_list: 2010 2020 2030 2040 2050
carbon_price_list: A list of integer carbon prices, matching in length the year_list.
discount_rate: The discount rate in percent.
"""
return [
('data', None, 'Data'),
('global', None, 'Global'),
('output_file', None, 'ge.pkl'),
('dispatch_order', mureilbuilder.make_string_list, None),
('do_plots', mureilbuilder.string_to_bool, False),
('year_list', mureilbuilder.make_string_list, None),
('carbon_price_list', mureilbuilder.make_int_list, None),
('discount_rate', float, 0.0)
]
def run(self, extra_data):
if (not self.is_configured):
msg = 'run requested, but GeMureilMaster is not configured'
logger.critical(msg)
raise mureilexception.ConfigException(msg, {})
# Read in the json data for generator capacity
self.load_js(extra_data)
all_years_out = {}
# Compute an annual total for generation
output_multiplier = (self.global_config['variable_cost_mult'] /
float(self.global_config['time_period_yrs']))
cuml_cost = 0.0
for year_index in range(len(self.config['year_list'])):
## MG - this is a hack. The config should be set with all
## of the values at the start, and then be passed the year,
## not have them updated each time. This is ok here as it's
## only evaluated once anyway.
results = self.evaluate_results(year_index)
year = self.config['year_list'][year_index]
# print results['gen_desc']
all_years_out[str(year)] = year_out = {}
# Output, in MWh
year_out['output'] = output_section = {}
# Cost, in $M
year_out['cost'] = cost_section = {}
# Total carbon emissions
year_out['co2_tonnes'] = 0.0
# Total demand, in MWh per annum
for generator_type, value in results['other'].iteritems():
if value is not None:
if 'ts_demand' in value:
year_out['demand'] = '{:.2f}'.format(
abs(sum(value['ts_demand'])) * self.global_config['timestep_hrs'] *
output_multiplier)
# Total output, in MWh per annum
for gen_type, vals in results['output'].iteritems():
output_section[gen_type] = '{:.2f}'.format(
sum(vals) * self.global_config['timestep_hrs'] *
output_multiplier)
# Total cost, per decade
this_period_cost = 0.0
for gen_type, value in results['cost'].iteritems():
cost_section[gen_type] = value
this_period_cost += value
# or as a string:
# cost_section[generator_type] = '{:.2f}'.format(value)
# Total cumulative cost, with discounting
# This assumes the costs are all incurred at the beginning of
# each period (a simplification)
year_out['period_cost'] = this_period_cost
cuml_cost += this_period_cost / ((1 + (self.config['discount_rate'] / 100)) **
(float(self.global_config['time_period_yrs']) * year_index))
year_out['discounted_cumulative_cost'] = cuml_cost
for gen_type, value in results['other'].iteritems():
if value is not None:
if 'reliability' in value:
year_out['reliability'] = value['reliability']
if 'carbon' in value:
year_out['co2_tonnes'] += value['carbon']
if 'reliability' not in year_out:
year_out['reliability'] = 100
return all_years_out
def load_js(self, json_data):
""" Input: JSON data structure with info on generators and demand management
at different time periods.
Output: None
Reads in the data and computes the params for each time period.
"""
generators = json.loads(json_data)['selections']['generators']
## Only coal, gas, wind and solar are handled, and only one of each.
## hydro is awaiting a rainfall-based model.
self.total_params = {}
self.inc_params = {}
gen_total_table = {}
gen_inc_table = {}
year_list = self.config['year_list']
gen_type_list = ['coal', 'gas', 'wind', 'solar']
gen_param_counts = {}
# Initialise the tables of capacity
for gen_type in gen_type_list:
gen_param_counts[gen_type] = self.gen_list[gen_type].get_param_count()
gen_total_table[gen_type] = numpy.zeros((len(self.config['year_list']),
gen_param_counts[gen_type]))
gen_inc_table[gen_type] = numpy.zeros((len(self.config['year_list']),
gen_param_counts[gen_type]))
# Fill in the tables of capacity
for gen in generators:
gen_type = gen['type']
if gen_type not in gen_type_list:
msg = 'Generator ' + str(gen_type) + ' ignored'
logger.warning(msg)
else:
this_total_table = gen_total_table[gen_type]
this_inc_table = gen_inc_table[gen_type]
loc_index = self.find_loc_index(gen)
if (loc_index >= gen_param_counts[gen_type]):
msg = ('Generator ' + gen['id'] + ' looked up index as ' + str(loc_index) +
' but the ' + gen_type + ' has data for ' + str(gen_param_counts[gen_type]) +
' sites.')
raise mureilexception.ConfigException(msg, {})
# build date could be specified as earlier, so capex is not paid.
build_index = numpy.where(numpy.array(year_list) == str(gen['decade']))
if len(build_index[0] > 0):
build_index = build_index[0][0]
else:
build_index = -1
decommission_index = numpy.where(numpy.array(year_list) == str(gen['decomission']))
if len(decommission_index[0] > 0):
decommission_index = decommission_index[0][0]
else:
decommission_index = len(year_list) - 1
# accumulate new capacity in the incremental list
if build_index >= 0:
this_inc_table[build_index][loc_index] += gen['capacity']
# and add the new capacity to the total across all years until decommissioning
start_fill = build_index
if (build_index == -1):
start_fill = 0
for i in range(start_fill, decommission_index + 1):
this_total_table[i][loc_index] += gen['capacity']
# Convert the tables of capacity to params for | |
'83271005',
'83412009',
'83707009',
'83774001',
'83883001',
'8414002',
'84194006',
'84224006',
'84233008',
'84261000119106',
'84414000',
'84490005',
'84681008',
'84724007',
'84753008',
'84889008',
'85051008',
'8519009',
'85224001',
'8549006',
'85495007',
'8555001',
'85857008',
'85884009',
'85904008',
'86028001',
'86070006',
'86279000',
'8628002',
'86443005',
'86479002',
'86615009',
'86709000',
'87073000',
'87117006',
'8725005',
'87282003',
'87318008',
'87415007',
'87665008',
'87696004',
'8771003',
'87728001',
'8776008',
'87815001',
'88027004',
'88157006',
'88220006',
'88361008',
'8838005',
'88547002',
'8872002',
'88813005',
'88850006',
'88943008',
'88981003',
'8912009',
'89194009',
'8954007',
'89637003',
'89933001',
'90206003',
'90271007',
'9091006',
'90979004',
'91038008',
'91155009',
'91195006',
'91538002',
'91541006',
'91554004',
'91669008',
'91862002',
'9241004',
'9516007',
'95423008',
'95424002',
'95425001',
'9557000',
'95593001',
'95679004',
'95746006',
'95811009',
'95886009',
'95889002',
'96171000119103',
'9713002',
'987007',
'9941009'
}
class ComplicationsOfPregnancyChildbirthAndThePuerperium(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent complications related to pregnancy or childbirth.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with complications of pregnancy or childbirth. This is a grouping of ICD-10-CM, ICD-9-CM, and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.111.12.1012'
VALUE_SET_NAME = 'Complications of Pregnancy, Childbirth and the Puerperium'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'N96',
'O000',
'O0000',
'O0001',
'O001',
'O0010',
'O00101',
'O00102',
'O00109',
'O0011',
'O00111',
'O00112',
'O00119',
'O002',
'O0020',
'O00201',
'O00202',
'O00209',
'O0021',
'O00211',
'O00212',
'O00219',
'O008',
'O0080',
'O0081',
'O009',
'O0090',
'O0091',
'O010',
'O011',
'O019',
'O020',
'O021',
'O0281',
'O0289',
'O029',
'O030',
'O031',
'O032',
'O0330',
'O0331',
'O0332',
'O0333',
'O0334',
'O0335',
'O0336',
'O0337',
'O0338',
'O0339',
'O034',
'O035',
'O036',
'O037',
'O0380',
'O0381',
'O0382',
'O0383',
'O0384',
'O0385',
'O0386',
'O0387',
'O0388',
'O0389',
'O039',
'O045',
'O046',
'O047',
'O0480',
'O0481',
'O0482',
'O0483',
'O0484',
'O0485',
'O0486',
'O0487',
'O0488',
'O0489',
'O070',
'O071',
'O072',
'O0730',
'O0731',
'O0732',
'O0733',
'O0734',
'O0735',
'O0736',
'O0737',
'O0738',
'O0739',
'O074',
'O080',
'O081',
'O082',
'O083',
'O084',
'O085',
'O086',
'O087',
'O0881',
'O0882',
'O0883',
'O0889',
'O089',
'O0900',
'O0901',
'O0902',
'O0903',
'O0910',
'O0911',
'O0912',
'O0913',
'O09211',
'O09212',
'O09213',
'O09219',
'O09291',
'O09292',
'O09293',
'O09299',
'O0930',
'O0931',
'O0932',
'O0933',
'O0940',
'O0941',
'O0942',
'O0943',
'O09511',
'O09512',
'O09513',
'O09519',
'O09521',
'O09522',
'O09523',
'O09529',
'O09611',
'O09612',
'O09613',
'O09619',
'O09621',
'O09622',
'O09623',
'O09629',
'O0970',
'O0971',
'O0972',
'O0973',
'O09811',
'O09812',
'O09813',
'O09819',
'O09821',
'O09822',
'O09823',
'O09829',
'O09891',
'O09892',
'O09893',
'O09899',
'O0990',
'O0991',
'O0992',
'O0993',
'O09A',
'O09A0',
'O09A1',
'O09A2',
'O09A3',
'O10011',
'O10012',
'O10013',
'O10019',
'O1002',
'O1003',
'O10111',
'O10112',
'O10113',
'O10119',
'O1012',
'O1013',
'O10211',
'O10212',
'O10213',
'O10219',
'O1022',
'O1023',
'O10311',
'O10312',
'O10313',
'O10319',
'O1032',
'O1033',
'O10411',
'O10412',
'O10413',
'O10419',
'O1042',
'O1043',
'O10911',
'O10912',
'O10913',
'O10919',
'O1092',
'O1093',
'O111',
'O112',
'O113',
'O114',
'O115',
'O119',
'O1200',
'O1201',
'O1202',
'O1203',
'O1204',
'O1205',
'O1210',
'O1211',
'O1212',
'O1213',
'O1214',
'O1215',
'O1220',
'O1221',
'O1222',
'O1223',
'O1224',
'O1225',
'O131',
'O132',
'O133',
'O134',
'O135',
'O139',
'O1400',
'O1402',
'O1403',
'O1404',
'O1405',
'O1410',
'O1412',
'O1413',
'O1414',
'O1415',
'O1420',
'O1422',
'O1423',
'O1424',
'O1425',
'O1490',
'O1492',
'O1493',
'O1494',
'O1495',
'O1500',
'O1502',
'O1503',
'O151',
'O152',
'O159',
'O161',
'O162',
'O163',
'O164',
'O165',
'O169',
'O200',
'O208',
'O209',
'O210',
'O211',
'O212',
'O218',
'O219',
'O2200',
'O2201',
'O2202',
'O2203',
'O2210',
'O2211',
'O2212',
'O2213',
'O2220',
'O2221',
'O2222',
'O2223',
'O2230',
'O2231',
'O2232',
'O2233',
'O2240',
'O2241',
'O2242',
'O2243',
'O2250',
'O2251',
'O2252',
'O2253',
'O228X1',
'O228X2',
'O228X3',
'O228X9',
'O2290',
'O2291',
'O2292',
'O2293',
'O2300',
'O2301',
'O2302',
'O2303',
'O2310',
'O2311',
'O2312',
'O2313',
'O2320',
'O2321',
'O2322',
'O2323',
'O2330',
'O2331',
'O2332',
'O2333',
'O2340',
'O2341',
'O2342',
'O2343',
'O23511',
'O23512',
'O23513',
'O23519',
'O23521',
'O23522',
'O23523',
'O23529',
'O23591',
'O23592',
'O23593',
'O23599',
'O2390',
'O2391',
'O2392',
'O2393',
'O24011',
'O24012',
'O24013',
'O24019',
'O2402',
'O2403',
'O24111',
'O24112',
'O24113',
'O24119',
'O2412',
'O2413',
'O24311',
'O24312',
'O24313',
'O24319',
'O2432',
'O2433',
'O24410',
'O24414',
'O24415',
'O24419',
'O24420',
'O24424',
'O24425',
'O24429',
'O24430',
'O24434',
'O24435',
'O24439',
'O24811',
'O24812',
'O24813',
'O24819',
'O2482',
'O2483',
'O24911',
'O24912',
'O24913',
'O24919',
'O2492',
'O2493',
'O2510',
'O2511',
'O2512',
'O2513',
'O252',
'O253',
'O2600',
'O2601',
'O2602',
'O2603',
'O2610',
'O2611',
'O2612',
'O2613',
'O2620',
'O2621',
'O2622',
'O2623',
'O2630',
'O2631',
'O2632',
'O2633',
'O2640',
'O2641',
'O2642',
'O2643',
'O2650',
'O2651',
'O2652',
'O2653',
'O26611',
'O26612',
'O26613',
'O26619',
'O2662',
'O2663',
'O26711',
'O26712',
'O26713',
'O26719',
'O2672',
'O2673',
'O26811',
'O26812',
'O26813',
'O26819',
'O26821',
'O26822',
'O26823',
'O26829',
'O26831',
'O26832',
'O26833',
'O26839',
'O26841',
'O26842',
'O26843',
'O26849',
'O26851',
'O26852',
'O26853',
'O26859',
'O2686',
'O26872',
'O26873',
'O26879',
'O26891',
'O26892',
'O26893',
'O26899',
'O2690',
'O2691',
'O2692',
'O2693',
'O280',
'O281',
'O282',
'O283',
'O284',
'O285',
'O288',
'O289',
'O29011',
'O29012',
'O29013',
'O29019',
'O29021',
'O29022',
'O29023',
'O29029',
'O29091',
'O29092',
'O29093',
'O29099',
'O29111',
'O29112',
'O29113',
'O29119',
'O29121',
'O29122',
'O29123',
'O29129',
'O29191',
'O29192',
'O29193',
'O29199',
'O29211',
'O29212',
'O29213',
'O29219',
'O29291',
'O29292',
'O29293',
'O29299',
'O293X1',
'O293X2',
'O293X3',
'O293X9',
'O2940',
'O2941',
'O2942',
'O2943',
'O295X1',
'O295X2',
'O295X3',
'O295X9',
'O2960',
'O2961',
'O2962',
'O2963',
'O298X1',
'O298X2',
'O298X3',
'O298X9',
'O2990',
'O2991',
'O2992',
'O2993',
'O30001',
'O30002',
'O30003',
'O30009',
'O30011',
'O30012',
'O30013',
'O30019',
'O30021',
'O30022',
'O30023',
'O30029',
'O30031',
'O30032',
'O30033',
'O30039',
'O30041',
'O30042',
'O30043',
'O30049',
'O30091',
'O30092',
'O30093',
'O30099',
'O30101',
'O30102',
'O30103',
'O30109',
'O30111',
'O30112',
'O30113',
'O30119',
'O30121',
'O30122',
'O30123',
'O30129',
'O30191',
'O30192',
'O30193',
'O30199',
'O30201',
'O30202',
'O30203',
'O30209',
'O30211',
'O30212',
'O30213',
'O30219',
'O30221',
'O30222',
'O30223',
'O30229',
'O30231',
'O30232',
'O30233',
'O30239',
'O30291',
'O30292',
'O30293',
'O30299',
'O30801',
'O30802',
'O30803',
'O30809',
'O30811',
'O30812',
'O30813',
'O30819',
'O30821',
'O30822',
'O30823',
'O30829',
'O30831',
'O30832',
'O30833',
'O30839',
'O30891',
'O30892',
'O30893',
'O30899',
'O3090',
'O3091',
'O3092',
'O3093',
'O3100X0',
'O3100X1',
'O3100X2',
'O3100X3',
'O3100X4',
'O3100X5',
'O3100X9',
'O3101X0',
'O3101X1',
'O3101X2',
'O3101X3',
'O3101X4',
'O3101X5',
'O3101X9',
'O3102X0',
'O3102X1',
'O3102X2',
'O3102X3',
'O3102X4',
'O3102X5',
'O3102X9',
'O3103X0',
'O3103X1',
'O3103X2',
'O3103X3',
'O3103X4',
'O3103X5',
'O3103X9',
'O3110X0',
'O3110X1',
'O3110X2',
'O3110X3',
'O3110X4',
'O3110X5',
'O3110X9',
'O3111X0',
'O3111X1',
'O3111X2',
'O3111X3',
'O3111X4',
'O3111X5',
'O3111X9',
'O3112X0',
'O3112X1',
'O3112X2',
'O3112X3',
'O3112X4',
'O3112X5',
'O3112X9',
'O3113X0',
'O3113X1',
'O3113X2',
'O3113X3',
'O3113X4',
'O3113X5',
'O3113X9',
'O3120X0',
'O3120X1',
'O3120X2',
'O3120X3',
'O3120X4',
'O3120X5',
'O3120X9',
'O3121X0',
'O3121X1',
'O3121X2',
'O3121X3',
'O3121X4',
'O3121X5',
'O3121X9',
'O3122X0',
'O3122X1',
'O3122X2',
'O3122X3',
'O3122X4',
'O3122X5',
'O3122X9',
'O3123X0',
'O3123X1',
'O3123X2',
'O3123X3',
'O3123X4',
'O3123X5',
'O3123X9',
'O3130X0',
'O3130X1',
'O3130X2',
'O3130X3',
'O3130X4',
'O3130X5',
'O3130X9',
'O3131X0',
'O3131X1',
'O3131X2',
'O3131X3',
'O3131X4',
'O3131X5',
'O3131X9',
'O3132X0',
'O3132X1',
'O3132X2',
'O3132X3',
'O3132X4',
'O3132X5',
'O3132X9',
'O3133X0',
'O3133X1',
'O3133X2',
'O3133X3',
'O3133X4',
'O3133X5',
'O3133X9',
'O318X10',
'O318X11',
'O318X12',
'O318X13',
'O318X14',
'O318X15',
'O318X19',
'O318X20',
'O318X21',
'O318X22',
'O318X23',
'O318X24',
'O318X25',
'O318X29',
'O318X30',
'O318X31',
'O318X32',
'O318X33',
'O318X34',
'O318X35',
'O318X39',
'O318X90',
'O318X91',
'O318X92',
'O318X93',
'O318X94',
'O318X95',
'O318X99',
'O320XX0',
'O320XX1',
'O320XX2',
'O320XX3',
'O320XX4',
'O320XX5',
'O320XX9',
'O321XX0',
'O321XX1',
'O321XX2',
'O321XX3',
'O321XX4',
'O321XX5',
'O321XX9',
'O322XX0',
'O322XX1',
'O322XX2',
'O322XX3',
'O322XX4',
'O322XX5',
'O322XX9',
'O323XX0',
'O323XX1',
'O323XX2',
'O323XX3',
'O323XX4',
'O323XX5',
'O323XX9',
'O324XX0',
'O324XX1',
'O324XX2',
'O324XX3',
'O324XX4',
'O324XX5',
'O324XX9',
'O326XX0',
'O326XX1',
'O326XX2',
'O326XX3',
'O326XX4',
'O326XX5',
'O326XX9',
'O328XX0',
'O328XX1',
'O328XX2',
'O328XX3',
'O328XX4',
'O328XX5',
'O328XX9',
'O329XX0',
'O329XX1',
'O329XX2',
'O329XX3',
'O329XX4',
'O329XX5',
'O329XX9',
'O330',
'O331',
'O332',
'O333XX0',
'O333XX1',
'O333XX2',
'O333XX3',
'O333XX4',
'O333XX5',
'O333XX9',
'O334XX0',
'O334XX1',
'O334XX2',
'O334XX3',
'O334XX4',
'O334XX5',
'O334XX9',
'O335XX0',
'O335XX1',
'O335XX2',
'O335XX3',
'O335XX4',
'O335XX5',
'O335XX9',
'O336XX0',
'O336XX1',
'O336XX2',
'O336XX3',
'O336XX4',
'O336XX5',
'O336XX9',
'O337',
'O337XX0',
'O337XX1',
'O337XX2',
'O337XX3',
'O337XX4',
'O337XX5',
'O337XX9',
'O338',
'O339',
'O3400',
'O3401',
'O3402',
'O3403',
'O3410',
'O3411',
'O3412',
'O3413',
'O3421',
'O34211',
'O34212',
'O34219',
'O3429',
'O3430',
'O3431',
'O3432',
'O3433',
'O3440',
'O3441',
'O3442',
'O3443',
'O34511',
'O34512',
'O34513',
'O34519',
'O34521',
'O34522',
'O34523',
'O34529',
'O34531',
'O34532',
'O34533',
'O34539',
'O34591',
'O34592',
'O34593',
'O34599',
'O3460',
'O3461',
'O3462',
'O3463',
'O3470',
'O3471',
'O3472',
'O3473',
'O3480',
'O3481',
'O3482',
'O3483',
'O3490',
'O3491',
'O3492',
'O3493',
'O350XX0',
'O350XX1',
'O350XX2',
'O350XX3',
'O350XX4',
'O350XX5',
'O350XX9',
'O351XX0',
'O351XX1',
'O351XX2',
'O351XX3',
'O351XX4',
'O351XX5',
'O351XX9',
'O352XX0',
'O352XX1',
'O352XX2',
'O352XX3',
'O352XX4',
'O352XX5',
'O352XX9',
'O353XX0',
'O353XX1',
'O353XX2',
'O353XX3',
'O353XX4',
'O353XX5',
'O353XX9',
'O354XX0',
'O354XX1',
'O354XX2',
'O354XX3',
'O354XX4',
'O354XX5',
'O354XX9',
'O355XX0',
'O355XX1',
'O355XX2',
'O355XX3',
'O355XX4',
'O355XX5',
'O355XX9',
'O356XX0',
'O356XX1',
'O356XX2',
'O356XX3',
'O356XX4',
'O356XX5',
'O356XX9',
'O357XX0',
'O357XX1',
'O357XX2',
'O357XX3',
'O357XX4',
'O357XX5',
'O357XX9',
'O358XX0',
'O358XX1',
'O358XX2',
'O358XX3',
'O358XX4',
'O358XX5',
'O358XX9',
'O359XX0',
'O359XX1',
'O359XX2',
'O359XX3',
'O359XX4',
'O359XX5',
'O359XX9',
'O360110',
'O360111',
'O360112',
'O360113',
'O360114',
'O360115',
'O360119',
'O360120',
'O360121',
'O360122',
'O360123',
'O360124',
'O360125',
'O360129',
'O360130',
'O360131',
'O360132',
'O360133',
'O360134',
'O360135',
'O360139',
'O360190',
'O360191',
'O360192',
'O360193',
'O360194',
'O360195',
'O360199',
'O360910',
'O360911',
'O360912',
'O360913',
'O360914',
'O360915',
'O360919',
'O360920',
'O360921',
'O360922',
'O360923',
'O360924',
'O360925',
'O360929',
'O360930',
'O360931',
'O360932',
'O360933',
'O360934',
'O360935',
'O360939',
'O360990',
'O360991',
'O360992',
'O360993',
'O360994',
'O360995',
'O360999',
'O361110',
'O361111',
'O361112',
'O361113',
'O361114',
'O361115',
'O361119',
'O361120',
'O361121',
'O361122',
'O361123',
'O361124',
'O361125',
'O361129',
'O361130',
'O361131',
'O361132',
'O361133',
'O361134',
'O361135',
'O361139',
'O361190',
'O361191',
'O361192',
'O361193',
'O361194',
'O361195',
'O361199',
'O361910',
'O361911',
'O361912',
'O361913',
'O361914',
'O361915',
'O361919',
'O361920',
'O361921',
'O361922',
'O361923',
'O361924',
'O361925',
'O361929',
'O361930',
'O361931',
'O361932',
'O361933',
'O361934',
'O361935',
'O361939',
'O361990',
'O361991',
'O361992',
'O361993',
'O361994',
'O361995',
'O361999',
'O3620X0',
'O3620X1',
'O3620X2',
'O3620X3',
'O3620X4',
'O3620X5',
'O3620X9',
'O3621X0',
'O3621X1',
'O3621X2',
'O3621X3',
'O3621X4',
'O3621X5',
'O3621X9',
'O3622X0',
'O3622X1',
'O3622X2',
'O3622X3',
'O3622X4',
'O3622X5',
'O3622X9',
'O3623X0',
'O3623X1',
'O3623X2',
'O3623X3',
'O3623X4',
'O3623X5',
'O3623X9',
| |
<filename>Dataset_Management.py
import torch
import numpy as np
class Artificial_DataLoader:
def __init__(self, world_size, rank, device, File, sampling_rate, number_of_concentrations, number_of_durations, number_of_diameters, window, length, batch_size,
max_num_of_pulses_in_a_wind=75):
assert window < length
self.world_size = world_size
self.rank = rank
self.rank_id = rank # the rank id will change according to the epoch
self.device = device
self.File = File
self.sampling_rate = sampling_rate
self.number_of_concentrations = number_of_concentrations
self.number_of_durations = number_of_durations
self.number_of_diameters = number_of_diameters
self.windows_per_signal = int(length / window)
# this is the shape of the structure of windows in the dataset
self.shape = (number_of_concentrations, number_of_durations, number_of_diameters, int(length / window))
# this is the total number of windows in the dataset
self.total_number_of_windows = self.number_of_concentrations * \
self.number_of_durations * \
self.number_of_diameters * \
self.windows_per_signal
# this is the size of the fragment from the total number of windows that corresponds to this rank
self.shard_size = self.total_number_of_windows // world_size
# if there is residue in the distribution of windows among ranks
# all shard sizes have to be incremented in one
# since all shard sizes have to be equal
if self.total_number_of_windows % world_size != 0:
self.shard_size += 1
self.window = window
self.length = length
self.batch_size = batch_size
self.max_num_of_pulses_in_a_wind = max_num_of_pulses_in_a_wind
self.avail_winds = self.get_avail_winds(self.shard_size)
# unravel indices in advance to avoid computational cost during execution
auxiliary = [i for i in range(self.total_number_of_windows)]
self.unraveled_indices = np.unravel_index(auxiliary, self.shape)
self.samples_indices = []
self.number_of_avail_windows = self.get_number_of_avail_windows()
@staticmethod
def get_avail_winds(shard_size):
return torch.ones((shard_size), dtype=bool)
# determines the quota of any number of things among ranks including residues
# for instance if total is 100 and world_size is 3, then rank 0 will have a quota of 4
# rank 1 a quota of 3 and rank 2 a quota of 3 too.
def _get_quota(self, world_size, rank, total):
assert(total >= world_size)
quota = total // world_size
residue = total % world_size
if rank < residue:
quota += 1
return quota
# restart all the available windows as it is when the object is created
# it rotates the identity of ranks at each epoch in order to make each rank to "see" all the samples
def reset_avail_winds(self, epoch):
self.rank_id = (self.rank + epoch) % self.world_size
# this is the fragment from the total number of windows that corresponds to this rank
self.shard_size = self._get_quota(self.world_size, self.rank_id, self.total_number_of_windows)
self.avail_winds = torch.ones((self.shard_size), dtype=bool)
self.number_of_avail_windows = self.get_number_of_avail_windows()
# make 100 random windows available
def _reset_random_winds(self):
i = 0
while i < 100:
window = torch.randint(0, self.shard_size, (1,1))[0].item()
if self.avail_winds[window] == False:
self.avail_winds[window] = True
i += 1
self.number_of_avail_windows += 100
# returns the number of available windows in the object
def get_number_of_avail_windows(self):
return sum(self.avail_winds==True).item()
# map from the local available resource in the rank to the global available resource in the world
# For instance, global resource is:
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
# world_size = 3
#
# rank 0 rank 1 rank 2
# 0, 1, 2, 3 0, 1, 2 0, 1, 2
#
# the mapping formula is:
# (sample * world_size) + rank
#
# rank 0 rank 1 rank 2
# (0 * 3) + 0 = 0 (0 * 3) + 1 = 1 (0 * 3) + 2 = 2
# (1 * 3) + 0 = 3 (1 * 3) + 1 = 4 (1 * 3) + 2 = 5
# (2 * 3) + 0 = 6 (2 * 3) + 1 = 7 (2 * 3) + 2 = 8
# (3 * 3) + 0 = 9
#
# this situation is going to rotate in according to the epoch
def _map_from_rank_to_world(self, sample):
return (sample * self.world_size) + self.rank_id
# get a sample from the available windows and set the sample as unavailable
def _get_sample(self):
if (self.number_of_avail_windows == 0):
self._reset_random_winds()
availables = np.where(self.avail_winds)
idx = torch.randint(0, availables[0].size, (1,1))[0].item()
sampled_window = availables[0][idx]
# set window as unavailable
self.avail_winds[sampled_window] = False
# map the sample from the rank domain to the global resources
sampled_window = self._map_from_rank_to_world(sampled_window)
sampled_window = (self.unraveled_indices[0][sampled_window], \
self.unraveled_indices[1][sampled_window], \
self.unraveled_indices[2][sampled_window], \
self.unraveled_indices[3][sampled_window],)
#sampled_window = np.unravel_index(sampled_window, self.shape)
self.number_of_avail_windows -= 1
return sampled_window
def _get_labels(self, time_window, Cnp, Duration, Dnp):
"Returns classes and bboxes inside the signal window"
dset_p = self.File['Cnp_' + str(Cnp+1) + '/Duration_' + str(Duration+1) + '/Dnp_' + str(Dnp+1) + '/parameters']
pulses_inside_window = np.where((torch.from_numpy(dset_p[0,:]) > time_window[0].cpu()) & \
(torch.from_numpy(dset_p[0,:]) < time_window[-1].cpu()))[0]
pulses_inside_window = pulses_inside_window.tolist()
start_times = dset_p[0,pulses_inside_window]
#pulse_widths = dset_p[1,pulses_inside_window]
#pulse_categories = dset_p[2,pulses_inside_window]
pulse_widths = dset_p[2,pulses_inside_window]
pulse_amplitudes = dset_p[3,pulses_inside_window]
number_of_pulses = len(pulses_inside_window)
if number_of_pulses == 0:
average_width = 0.0
average_amplitude = 0.0
else:
average_width = np.average(pulse_widths)
average_amplitude = np.average(pulse_amplitudes)
starts = (torch.from_numpy(start_times) - time_window[0].cpu()) / self.window
widths = torch.from_numpy(pulse_widths) / self.window
amplitudes = torch.from_numpy(pulse_amplitudes)
starts = starts.tolist()
widths = widths.tolist()
amplitudes = amplitudes.tolist()
#categories = pulse_categories.tolist()
categories = np.zeros(len(pulses_inside_window)).tolist()
starts = (starts + [1.0]*(self.max_num_of_pulses_in_a_wind - len(starts)))
widths = (widths + [1.0]*(self.max_num_of_pulses_in_a_wind - len(widths)))
amplitudes = (amplitudes + [1.0]*(self.max_num_of_pulses_in_a_wind - len(amplitudes)))
categories = (categories + [1.0]*(self.max_num_of_pulses_in_a_wind - len(categories)))
starts = torch.FloatTensor(starts)
widths = torch.FloatTensor(widths)
amplitudes = torch.FloatTensor(amplitudes)
categories = torch.FloatTensor(categories)
return starts, widths, amplitudes, categories, number_of_pulses, average_width, average_amplitude
def _get_signal_window(self, with_labels=False):
if len(self.samples_indices) == 0: # bring 100 samples
for i in range(100):
self.samples_indices.append(self._get_sample())
sample = self.samples_indices.pop(0)
Cnp = sample[0]
Duration = sample[1]
Dnp = sample[2]
window_number = sample[3]
dset = self.File['Cnp_' + str(Cnp+1) + '/Duration_' + str(Duration+1) + '/Dnp_' + str(Dnp+1) + '/data']
#assert dset.shape[1] % self.length == 0
samples_per_second = int(dset.shape[1] / self.length)
samples_per_window = int(samples_per_second * self.window)
begin = window_number * samples_per_window
end = begin + samples_per_window
time_window = torch.Tensor(dset[0,begin:end]).to(self.device)
clean_signal = torch.Tensor(dset[1,begin:end]).to(self.device)
noisy_signal = torch.Tensor(dset[2,begin:end]).to(self.device)
if with_labels:
starts, widths, amplitudes, categories, number_of_pulses, average_width, average_amplitude = self._get_labels(time_window, Cnp, Duration, Dnp)
return time_window, clean_signal, noisy_signal, starts, widths, amplitudes, categories, number_of_pulses, average_width, average_amplitude
else:
return time_window, clean_signal, noisy_signal
def get_batch(self, descart_empty_windows=True):
#assert sum(self.avail_winds == True) > self.batch_size
noisy_signals = torch.Tensor(self.batch_size, int(self.window*self.sampling_rate)).to(self.device)
clean_signals = torch.Tensor(self.batch_size, int(self.window*self.sampling_rate)).to(self.device)
times = torch.Tensor(self.batch_size, int(self.window*self.sampling_rate)).to(self.device)
pulse_labels = torch.Tensor(self.batch_size, 4, self.max_num_of_pulses_in_a_wind).to(self.device)
average_labels = torch.Tensor(self.batch_size, 3).to(self.device)
for i in range(self.batch_size):
number_of_pulses = 0
if descart_empty_windows:
while(number_of_pulses==0):
Time, Clean_signal, Noisy_signal, starts, widths, amplitudes, categories,\
number_of_pulses, average_width, average_amplitude = self._get_signal_window(with_labels=True)
else:
Time, Clean_signal, Noisy_signal, starts, widths, amplitudes, categories,\
number_of_pulses, average_width, average_amplitude = self._get_signal_window(with_labels=True)
times[i] = Time
clean_signals[i] = Clean_signal
noisy_signals[i] = Noisy_signal
pulse_labels[i][0] = starts
pulse_labels[i][1] = widths
pulse_labels[i][2] = amplitudes
pulse_labels[i][3] = categories
average_labels[i][0] = number_of_pulses
average_labels[i][1] = average_width
average_labels[i][2] = average_amplitude
return times, noisy_signals, clean_signals, pulse_labels, average_labels
def get_signal_window(self, Cnp, Duration, Dnp, window_number):
dset = self.File['Cnp_' + str(Cnp+1) + '/Duration_' + str(Duration+1) + '/Dnp_' + str(Dnp+1) + '/data']
#assert dset.shape[1] % self.length == 0
samples_per_second = int(dset.shape[1] / self.length)
samples_per_window = int(samples_per_second * self.window)
begin = window_number * samples_per_window
end = begin + samples_per_window
time_window = torch.Tensor(dset[0,begin:end]).to(self.device)
clean_signal = torch.Tensor(dset[1,begin:end]).to(self.device)
noisy_signal = torch.Tensor(dset[2,begin:end]).to(self.device)
starts, widths, amplitudes, categories, number_of_pulses, average_width, average_amplitude = self._get_labels(time_window, Cnp, Duration, Dnp)
pulse_labels = torch.Tensor(4, self.max_num_of_pulses_in_a_wind).to(self.device)
average_labels = torch.Tensor(3).to(self.device)
pulse_labels[0] = starts
pulse_labels[1] = widths
pulse_labels[2] = amplitudes
pulse_labels[3] = categories
average_labels[0] = number_of_pulses
average_labels[1] = average_width
average_labels[2] = average_amplitude
return time_window, noisy_signal, clean_signal, pulse_labels, average_labels
class Unlabeled_Real_DataLoader:
def __init__(self, device, File, num_of_traces, window, length):
assert window < length
self.device = device
self.File = File
self.num_of_traces = num_of_traces
self.windows_per_trace = int(length / window)
# this is the shape of the structure of windows in the dataset
self.shape = (self.num_of_traces, int(length / window))
# this is the total number of windows in the dataset
self.total_number_of_windows = self.num_of_traces * self.windows_per_trace
self.window = window
self.length = length
def get_signal_window(self, trace_number, window_number):
dset = self.File['Volt_' + str(trace_number+1) + '/data']
#assert dset.shape[1] % self.length == 0
samples_per_second = int(dset.shape[1] / self.length)
samples_per_window = int(samples_per_second * self.window)
begin = window_number * samples_per_window
end = begin + samples_per_window
time_window = torch.Tensor(dset[0,begin:end]).to(self.device)
signal = torch.Tensor(dset[1,begin:end]).to(self.device)
return time_window, signal
class Labeled_Real_DataLoader:
def __init__(self, device, File, num_of_traces, window, length):
assert window < length
self.device = device
self.File = File
self.num_of_traces | |
first verifies all self-certificates and then
only considers successfully verified ones, hence we cannot modify the
certificate data, before passing it to _assign_certified_key_info
IMO the best solution is a better separation of concerns, e.g. separate
self-certificate verification and packet prioritization.
"""
# Test ambiguity resolution scheme with 3 User IDs
# :user ID packet: "Test Expiration I <<EMAIL>>"
# :user ID packet: "Test Expiration II <<EMAIL>>"
# :user ID packet: "Test Expiration III <<EMAIL>>"
# User ID packets are ordered by their creation time in ascending order.
# "Test Expiration II" has the primary user ID flag set and therefor has
# the highest priority.
key = _assign_certified_key_info(self.raw_expired_key_bundle)
self.assertTrue(key["validity_period"] == 87901) # ~ 1 day
# Test ambiguity resolution scheme with 2 User IDs
# :user ID packet: "Test Expiration III <<EMAIL>>"
# :user ID packet: "Test Expiration I <<EMAIL>>"
# User ID packets are ordered by their creation time in descending order.
# Neither packet has the primary user ID flag set.
# "Test Expiration III" has the highest priority.
raw_key_bundle = deepcopy(self.raw_expired_key_bundle)
user_id_items = list(reversed(raw_key_bundle[PACKET_TYPE_USER_ID].items()))
del user_id_items[1]
raw_key_bundle[PACKET_TYPE_USER_ID] = OrderedDict(user_id_items)
key = _assign_certified_key_info(raw_key_bundle)
self.assertTrue(key["validity_period"] == 87901) # ~ 1 day
def test_get_verified_subkeys_errors(self):
"""Test _get_verified_subkeys errors with manually crafted data based on
real gpg key data (see self.raw_key_bundle). """
# Tamper with subkey (change version number) to trigger key parsing error
bad_subkey_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = bad_subkey_bundle[PACKET_TYPE_SUB_KEY].popitem()
packet = bytes(packet[:packet_data["header_len"]] +
bytearray([0x03]) + packet[packet_data["header_len"]+1:])
bad_subkey_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Add bogus sig to trigger sig parsing error
wrong_sig_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = wrong_sig_bundle[PACKET_TYPE_SUB_KEY].popitem()
# NOTE: We can't only pass the bogus sig, because that would also trigger
# the not enough sigs error (see not_enough_sigs_bundle) and mock only
# lets us assert for the most recent log statement
packet_data["signatures"].append(bytearray([0b01111111, 0]))
wrong_sig_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Remove sigs to trigger not enough sigs error
not_enough_sigs_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = not_enough_sigs_bundle[PACKET_TYPE_SUB_KEY].popitem()
packet_data["signatures"] = []
not_enough_sigs_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Duplicate sig to trigger wrong amount signatures
too_many_sigs_bundle = deepcopy(self.raw_key_bundle)
packet, packet_data = too_many_sigs_bundle[PACKET_TYPE_SUB_KEY].popitem()
packet_data["signatures"] = packet_data["signatures"] * 2
too_many_sigs_bundle[PACKET_TYPE_SUB_KEY][packet] = packet_data
# Tamper with primary key to trigger signature verification error
invalid_sig_bundle = deepcopy(self.raw_key_bundle)
invalid_sig_bundle[PACKET_TYPE_PRIMARY_KEY]["packet"] = \
invalid_sig_bundle[PACKET_TYPE_PRIMARY_KEY]["packet"][:-1]
test_data = [
(bad_subkey_bundle, "Pubkey packet version '3' not supported"),
(wrong_sig_bundle, "Expected packet 2, but got 63 instead"),
(not_enough_sigs_bundle, "wrong amount of key binding signatures (0)"),
(too_many_sigs_bundle, "wrong amount of key binding signatures (2)"),
(invalid_sig_bundle, "invalid key binding signature"),
]
for bundle, expected_msg in test_data:
with patch("in_toto.gpg.common.LOG") as mock_log:
_get_verified_subkeys(bundle)
msg = str(mock_log.info.call_args[0][0])
self.assertTrue(expected_msg in msg,
"'{}' not in '{}'".format(expected_msg, msg))
def test_get_verified_subkeys(self):
"""Test correct assignment of subkey expiration date in
gpg.common._get_verified_subkeys using real gpg data. """
subkeys = _get_verified_subkeys(self.raw_expired_key_bundle)
# Test subkey with validity period 175451, i.e. ~ 2 days
self.assertTrue(subkeys["0ce427fa3f0f50bc83a4a760ed95e1581691db4d"].get(
"validity_period") == 175451)
# Test subkey without validity period, i.e. it does not expire
self.assertTrue(subkeys["<KEY>"].get(
"validity_period") is None)
def test_get_pubkey_bundle_errors(self):
"""Pass wrong keyid with valid gpg data to trigger KeyNotFoundError. """
not_associated_keyid = "8465A1E2E0FB2B40ADB2478E18FB3F537E0C8A17"
with self.assertRaises(KeyNotFoundError):
get_pubkey_bundle(self.raw_key_data, not_associated_keyid)
def test_parse_signature_packet_errors(self):
"""Test parse_signature_packet errors with manually crafted data. """
# passed data | expected error message
test_data = [
(bytearray([0b01000010, 1, 255]),
"Signature version '255' not supported"),
(bytearray([0b01000010, 2, 4, 255]),
"Signature type '255' not supported"),
(bytearray([0b01000010, 3, 4, 0, 255]),
"Signature algorithm '255' not supported"),
(bytearray([0b01000010, 4, 4, 0, 1, 255]),
"Hash algorithm '255' not supported"),
]
for data, expected_error_str in test_data:
with self.assertRaises(ValueError) as ctx:
parse_signature_packet(data)
self.assertTrue(expected_error_str in str(ctx.exception),
"'{}' not in '{}'".format(expected_error_str, str(ctx.exception)))
@unittest.skipIf(os.getenv("TEST_SKIP_GPG"), "gpg not found")
class TestGPGRSA(unittest.TestCase):
"""Test signature creation, verification and key export from the gpg
module"""
default_keyid = "8465A1E2E0FB2B40ADB2478E18FB3F537E0C8A17"
signing_subkey_keyid = "C5A0ABE6EC19D0D65F85E2C39BE9DF5131D924E9"
encryption_subkey_keyid = "6A112FD3390B2E53AFC2E57F8FC8E12099AECEEA"
unsupported_subkey_keyid = "611A9B648E16F54E8A7FAD5DA51E8CDF3B06524F"
expired_key_keyid = "<KEY>"
@classmethod
def setUpClass(self):
# Create directory to run the tests without having everything blow up
self.working_dir = os.getcwd()
# Find demo files
gpg_keyring_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "rsa")
self.test_dir = os.path.realpath(tempfile.mkdtemp())
self.gnupg_home = os.path.join(self.test_dir, "rsa")
shutil.copytree(gpg_keyring_path, self.gnupg_home)
os.chdir(self.test_dir)
@classmethod
def tearDownClass(self):
"""Change back to initial working dir and remove temp test directory. """
os.chdir(self.working_dir)
shutil.rmtree(self.test_dir)
def test_gpg_export_pubkey(self):
""" export a public key and make sure the parameters are the right ones:
since there's very little we can do to check rsa key parameters are right
we pre-exported the public key to an ssh key, which we can load with
cryptography for the sake of comparison """
# export our gpg key, using our functions
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
our_exported_key = rsa_create_pubkey(key_data)
# load the equivalent ssh key, and make sure that we get the same RSA key
# parameters
ssh_key_basename = "{}.ssh".format(self.default_keyid)
ssh_key_path = os.path.join(self.gnupg_home, ssh_key_basename)
with open(ssh_key_path, "rb") as fp:
keydata = fp.read()
ssh_key = serialization.load_ssh_public_key(keydata,
backends.default_backend())
self.assertEqual(ssh_key.public_numbers().n,
our_exported_key.public_numbers().n)
self.assertEqual(ssh_key.public_numbers().e,
our_exported_key.public_numbers().e)
subkey_keyids = list(key_data["subkeys"].keys())
# We export the whole master key bundle which must contain the subkeys
self.assertTrue(self.signing_subkey_keyid.lower() in subkey_keyids)
# Currently we do not exclude encryption subkeys
self.assertTrue(self.encryption_subkey_keyid.lower() in subkey_keyids)
# However we do exclude subkeys, whose algorithm we do not support
self.assertFalse(self.unsupported_subkey_keyid.lower() in subkey_keyids)
# When passing the subkey keyid we also export the whole keybundle
key_data2 = gpg_export_pubkey(self.signing_subkey_keyid,
homedir=self.gnupg_home)
self.assertDictEqual(key_data, key_data2)
def test_gpg_sign_and_verify_object_with_default_key(self):
"""Create a signature using the default key on the keyring """
test_data = b'test_data'
wrong_data = b'something malicious'
signature = gpg_sign_object(test_data, homedir=self.gnupg_home)
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(gpg_verify_signature(signature, key_data, test_data))
self.assertFalse(gpg_verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object(self):
"""Create a signature using a specific key on the keyring """
test_data = b'test_data'
wrong_data = b'something malicious'
signature = gpg_sign_object(test_data, keyid=self.default_keyid,
homedir=self.gnupg_home)
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(gpg_verify_signature(signature, key_data, test_data))
self.assertFalse(gpg_verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object_default_keyring(self):
"""Sign/verify using keyring from envvar. """
test_data = b'test_data'
gnupg_home_backup = os.environ.get("GNUPGHOME")
os.environ["GNUPGHOME"] = self.gnupg_home
signature = gpg_sign_object(test_data, keyid=self.default_keyid)
key_data = gpg_export_pubkey(self.default_keyid)
self.assertTrue(gpg_verify_signature(signature, key_data, test_data))
# Reset GNUPGHOME
if gnupg_home_backup:
os.environ["GNUPGHOME"] = gnupg_home_backup
else:
del os.environ["GNUPGHOME"]
def test_gpg_sign_object_with_expired_key(self):
"""Test signing with expired key raises gpg CommandError. """
with self.assertRaises(CommandError) as ctx:
gpg_sign_object(b"livestock", keyid=self.expired_key_keyid,
homedir=self.gnupg_home)
expected = "returned non-zero exit status '2'"
self.assertTrue(expected in str(ctx.exception), "{} not in {}".format(
expected, ctx.exception))
def test_gpg_verify_signature_with_expired_key(self):
"""Test sig verification with expired key raises KeyExpirationError. """
signature = {
"keyid": self.expired_key_keyid,
"other_headers": "deadbeef",
"signature": "deadbeef",
}
content = b"livestock"
key = gpg_export_pubkey(self.expired_key_keyid,
homedir=self.gnupg_home)
with self.assertRaises(KeyExpirationError) as ctx:
gpg_verify_signature(signature, key, content)
expected = ("GPG key '<KEY>' "
"created on '2019-03-25 12:46 UTC' with validity period '1 day, "
"0:25:01' expired on '2019-03-26 13:11 UTC'.")
self.assertTrue(expected == str(ctx.exception),
"\nexpected: {}"
"\ngot: {}".format(expected, ctx.exception))
@unittest.skipIf(os.getenv("TEST_SKIP_GPG"), "gpg not found")
class TestGPGDSA(unittest.TestCase):
""" Test signature creation, verification and key export from the gpg
module """
default_keyid = "<KEY>"
@classmethod
def setUpClass(self):
# Create directory to run the tests without having everything blow up
self.working_dir = os.getcwd()
self.test_dir = os.path.realpath(tempfile.mkdtemp())
self.gnupg_home = os.path.join(self.test_dir, "dsa")
# Find keyrings
keyrings = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gpg_keyrings", "dsa")
shutil.copytree(keyrings, self.gnupg_home)
os.chdir(self.test_dir)
@classmethod
def tearDownClass(self):
"""Change back to initial working dir and remove temp test directory. """
os.chdir(self.working_dir)
shutil.rmtree(self.test_dir)
def test_gpg_export_pubkey(self):
""" export a public key and make sure the parameters are the right ones:
since there's very little we can do to check rsa key parameters are right
we pre-exported the public key to an ssh key, which we can load with
cryptography for the sake of comparison """
# export our gpg key, using our functions
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
our_exported_key = dsa_create_pubkey(key_data)
# load the equivalent ssh key, and make sure that we get the same RSA key
# parameters
ssh_key_basename = "{}.ssh".format(self.default_keyid)
ssh_key_path = os.path.join(self.gnupg_home, ssh_key_basename)
with open(ssh_key_path, "rb") as fp:
keydata = fp.read()
ssh_key = serialization.load_ssh_public_key(keydata,
backends.default_backend())
self.assertEqual(ssh_key.public_numbers().y,
our_exported_key.public_numbers().y)
self.assertEqual(ssh_key.public_numbers().parameter_numbers.g,
our_exported_key.public_numbers().parameter_numbers.g)
self.assertEqual(ssh_key.public_numbers().parameter_numbers.q,
our_exported_key.public_numbers().parameter_numbers.q)
self.assertEqual(ssh_key.public_numbers().parameter_numbers.p,
our_exported_key.public_numbers().parameter_numbers.p)
def test_gpg_sign_and_verify_object_with_default_key(self):
"""Create a signature using the default key on the keyring """
test_data = b'test_data'
wrong_data = b'something malicious'
signature = gpg_sign_object(test_data, homedir=self.gnupg_home)
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(gpg_verify_signature(signature, key_data, test_data))
self.assertFalse(gpg_verify_signature(signature, key_data, wrong_data))
def test_gpg_sign_and_verify_object(self):
"""Create a signature using a specific key on the keyring """
test_data = b'test_data'
wrong_data = b'something malicious'
signature = gpg_sign_object(test_data, keyid=self.default_keyid,
homedir=self.gnupg_home)
key_data = gpg_export_pubkey(self.default_keyid, homedir=self.gnupg_home)
self.assertTrue(gpg_verify_signature(signature, key_data, test_data))
self.assertFalse(gpg_verify_signature(signature, key_data, wrong_data))
if | |
[],
'targets': target_list,
'tests': test_list,
}
return build_yaml_like
def _extract_cc_tests(bazel_rules: BuildDict) -> List[str]:
"""Gets list of cc_test tests from bazel rules"""
result = []
for bazel_rule in list(bazel_rules.values()):
if bazel_rule['class'] == 'cc_test':
test_name = bazel_rule['name']
if test_name.startswith('//'):
prefixlen = len('//')
result.append(test_name[prefixlen:])
return list(sorted(result))
def _exclude_unwanted_cc_tests(tests: List[str]) -> List[str]:
"""Filters out bazel tests that we don't want to run with other build systems or we cannot build them reasonably"""
# most qps tests are autogenerated, we are fine without them
tests = [test for test in tests if not test.startswith('test/cpp/qps:')]
# microbenchmarks aren't needed for checking correctness
tests = [
test for test in tests
if not test.startswith('test/cpp/microbenchmarks:')
]
tests = [
test for test in tests
if not test.startswith('test/core/promise/benchmark:')
]
# we have trouble with census dependency outside of bazel
tests = [
test for test in tests
if not test.startswith('test/cpp/ext/filters/census:') and
not test.startswith('test/core/xds:xds_channel_stack_modifier_test')
]
# missing opencensus/stats/stats.h
tests = [
test for test in tests if not test.startswith(
'test/cpp/end2end:server_load_reporting_end2end_test')
]
tests = [
test for test in tests if not test.startswith(
'test/cpp/server/load_reporter:lb_load_reporter_test')
]
# The test uses --running_under_bazel cmdline argument
# To avoid the trouble needing to adjust it, we just skip the test
tests = [
test for test in tests if not test.startswith(
'test/cpp/naming:resolver_component_tests_runner_invoker')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:time_change_test')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:client_crash_test')
]
# the test requires 'server_crash_test_client' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:server_crash_test')
]
# test never existed under build.yaml and it fails -> skip it
tests = [
test for test in tests
if not test.startswith('test/core/tsi:ssl_session_cache_test')
]
# the binary of this test does not get built with cmake
tests = [
test for test in tests
if not test.startswith('test/cpp/util:channelz_sampler_test')
]
# we don't need to generate fuzzers outside of bazel
tests = [test for test in tests if not test.endswith('_fuzzer')]
return tests
def _generate_build_extra_metadata_for_tests(
tests: List[str], bazel_rules: BuildDict) -> BuildDict:
"""For given tests, generate the "extra metadata" that we need for our "build.yaml"-like output. The extra metadata is generated from the bazel rule metadata by using a bunch of heuristics."""
test_metadata = {}
for test in tests:
test_dict = {'build': 'test', '_TYPE': 'target'}
bazel_rule = bazel_rules[_get_bazel_label(test)]
bazel_tags = bazel_rule['tags']
if 'manual' in bazel_tags:
# don't run the tests marked as "manual"
test_dict['run'] = False
if bazel_rule['flaky']:
# don't run tests that are marked as "flaky" under bazel
# because that would only add noise for the run_tests.py tests
# and seeing more failures for tests that we already know are flaky
# doesn't really help anything
test_dict['run'] = False
if 'no_uses_polling' in bazel_tags:
test_dict['uses_polling'] = False
if 'grpc_fuzzer' == bazel_rule['generator_function']:
# currently we hand-list fuzzers instead of generating them automatically
# because there's no way to obtain maxlen property from bazel BUILD file.
print(('skipping fuzzer ' + test))
continue
if 'bazel_only' in bazel_tags:
continue
# if any tags that restrict platform compatibility are present,
# generate the "platforms" field accordingly
# TODO(jtattermusch): there is also a "no_linux" tag, but we cannot take
# it into account as it is applied by grpc_cc_test when poller expansion
# is made (for tests where uses_polling=True). So for now, we just
# assume all tests are compatible with linux and ignore the "no_linux" tag
# completely.
known_platform_tags = set(['no_windows', 'no_mac'])
if set(bazel_tags).intersection(known_platform_tags):
platforms = []
# assume all tests are compatible with linux and posix
platforms.append('linux')
platforms.append(
'posix') # there is no posix-specific tag in bazel BUILD
if not 'no_mac' in bazel_tags:
platforms.append('mac')
if not 'no_windows' in bazel_tags:
platforms.append('windows')
test_dict['platforms'] = platforms
cmdline_args = bazel_rule['args']
if cmdline_args:
test_dict['args'] = list(cmdline_args)
if test.startswith('test/cpp'):
test_dict['language'] = 'c++'
elif test.startswith('test/core'):
test_dict['language'] = 'c'
else:
raise Exception('wrong test' + test)
# short test name without the path.
# There can be name collisions, but we will resolve them later
simple_test_name = os.path.basename(_extract_source_file_path(test))
test_dict['_RENAME'] = simple_test_name
test_metadata[test] = test_dict
# detect duplicate test names
tests_by_simple_name = {}
for test_name, test_dict in list(test_metadata.items()):
simple_test_name = test_dict['_RENAME']
if not simple_test_name in tests_by_simple_name:
tests_by_simple_name[simple_test_name] = []
tests_by_simple_name[simple_test_name].append(test_name)
# choose alternative names for tests with a name collision
for collision_list in list(tests_by_simple_name.values()):
if len(collision_list) > 1:
for test_name in collision_list:
long_name = test_name.replace('/', '_').replace(':', '_')
print((
'short name of "%s" collides with another test, renaming to %s'
% (test_name, long_name)))
test_metadata[test_name]['_RENAME'] = long_name
return test_metadata
def _parse_http_archives(xml_tree: ET.Element) -> 'List[ExternalProtoLibrary]':
"""Parse Bazel http_archive rule into ExternalProtoLibrary objects."""
result = []
for xml_http_archive in xml_tree:
if xml_http_archive.tag != 'rule' or xml_http_archive.attrib[
'class'] != 'http_archive':
continue
# A distilled Python representation of Bazel http_archive
http_archive = dict()
for xml_node in xml_http_archive:
if xml_node.attrib['name'] == 'name':
http_archive["name"] = xml_node.attrib['value']
if xml_node.attrib['name'] == 'urls':
http_archive["urls"] = []
for url_node in xml_node:
http_archive["urls"].append(url_node.attrib['value'])
if xml_node.attrib['name'] == 'url':
http_archive["urls"] = [xml_node.attrib['value']]
if xml_node.attrib['name'] == 'sha256':
http_archive["hash"] = xml_node.attrib['value']
if xml_node.attrib['name'] == 'strip_prefix':
http_archive["strip_prefix"] = xml_node.attrib['value']
if http_archive["name"] not in EXTERNAL_PROTO_LIBRARIES:
# If this http archive is not one of the external proto libraries,
# we don't want to include it as a CMake target
continue
lib = EXTERNAL_PROTO_LIBRARIES[http_archive["name"]]
lib.urls = http_archive["urls"]
lib.hash = http_archive["hash"]
lib.strip_prefix = http_archive["strip_prefix"]
result.append(lib)
return result
def _generate_external_proto_libraries() -> List[Dict[str, Any]]:
"""Generates the build metadata for external proto libraries"""
xml_tree = _bazel_query_xml_tree('kind(http_archive, //external:*)')
libraries = _parse_http_archives(xml_tree)
libraries.sort(key=lambda x: x.destination)
return list(map(lambda x: x.__dict__, libraries))
def _detect_and_print_issues(build_yaml_like: BuildYaml) -> None:
"""Try detecting some unusual situations and warn about them."""
for tgt in build_yaml_like['targets']:
if tgt['build'] == 'test':
for src in tgt['src']:
if src.startswith('src/') and not src.endswith('.proto'):
print(('source file from under "src/" tree used in test ' +
tgt['name'] + ': ' + src))
# extra metadata that will be used to construct build.yaml
# there are mostly extra properties that we weren't able to obtain from the bazel build
# _TYPE: whether this is library, target or test
# _RENAME: whether this target should be renamed to a different name (to match expectations of make and cmake builds)
_BUILD_EXTRA_METADATA = {
'third_party/address_sorting:address_sorting': {
'language': 'c',
'build': 'all',
'_RENAME': 'address_sorting'
},
'gpr': {
'language': 'c',
'build': 'all',
},
'grpc': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpc++': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
'grpc++_alts': {
'language': 'c++',
'build': 'all',
'baselib': True
},
'grpc++_error_details': {
'language': 'c++',
'build': 'all'
},
'grpc++_reflection': {
'language': 'c++',
'build': 'all'
},
'grpc++_unsecure': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
'grpc_unsecure': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpcpp_channelz': {
'language': 'c++',
'build': 'all'
},
'grpc++_test': {
'language': 'c++',
'build': 'private',
},
'src/compiler:grpc_plugin_support': {
'language': 'c++',
'build': 'protoc',
'_RENAME': 'grpc_plugin_support'
},
'src/compiler:grpc_cpp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_cpp_plugin'
},
'src/compiler:grpc_csharp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_csharp_plugin'
},
'src/compiler:grpc_node_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_node_plugin'
},
'src/compiler:grpc_objective_c_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_objective_c_plugin'
},
'src/compiler:grpc_php_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_php_plugin'
},
'src/compiler:grpc_python_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_python_plugin'
},
'src/compiler:grpc_ruby_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_ruby_plugin'
},
# TODO(jtattermusch): consider adding grpc++_core_stats
# test support libraries
'test/core/util:grpc_test_util': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util'
},
'test/core/util:grpc_test_util_unsecure': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util_unsecure'
},
# TODO(jtattermusch): consider adding grpc++_test_util_unsecure - it doesn't seem to be used by bazel build (don't forget to set secure: False)
'test/cpp/util:test_config': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_config'
},
'test/cpp/util:test_util': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_util'
},
# end2end test support libraries
'test/core/end2end:end2end_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_tests'
},
# benchmark support libraries
'test/cpp/microbenchmarks:helpers': {
'language': 'c++',
'build': 'test',
'defaults': 'benchmark',
'_RENAME': 'benchmark_helpers'
},
'test/cpp/interop:interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_client'
},
'test/cpp/interop:interop_server': {
'language': | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#2020.0610 # Upgraded tests to v2; set up tests against AOP which seems to be discontinued and thus constant
import unittest
import requests
from unitTestConfig import base_plus_endpoint_encoded, headers, get_headers_not_logged_in
# Get session, but not logged in.
headers = get_headers_not_logged_in()
class TestDatabaseSmartSearch(unittest.TestCase):
def test_0_smartsearch_endpoint(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/SmartSearch/?smarttext=aop.033.0079a&abstract=True')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (f'Smarttext: {response_info["description"]}')
#response_set = r["documentList"]["responseSet"]
assert(response_info["count"] == 1)
def test_0_name_year_smartsearch_endpoint(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/SmartSearch/?smarttext=Tuckett 1982&sort=rank&limit=15&offset=0')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True) # rank is accepted, same as score
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
assert(response_info["fullCount"] == 1)
#print (response_set)
for n in response_set:
print (n["documentRef"])
# Confirm that the request-response cycle completed successfully.
def test_0_DOI_smartsearch_endpoint(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/SmartSearch/?smarttext=10.3280/PU2019-004002')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (response_info)
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
def test_1_smartsearch_regular(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?sourcecode=OPUS&smarttext=physics%20science%20observations&abstract=True')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (f'Smarttext: {response_info["description"]}')
#response_set = r["documentList"]["responseSet"]
assert(response_info["count"] == 1)
def test__2a_smartsearch_locator1(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=aop.033.0079a&abstract=True')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (f'Smarttext: {response_info["description"]}')
#response_set = r["documentList"]["responseSet"]
assert(response_info["count"] == 1) # should be . I confirmed all three papers above in test_search_long_para...not sure why this fails.
def test_2b_smartsearch_locator2(self):
# Partial locator, gets all articles in this volume.
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=art_id:AOP.033.*&abstract=True')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (f'Smarttext: {response_info["description"]}')
response_set = r["documentList"]["responseSet"]
assert(response_info["fullCount"] == 19)
#print (response_set)
def test_003_smartsearch_name_year(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett 1982&sort=rank&limit=15&offset=0')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True) # rank is accepted, same as score
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
assert(response_info["fullCount"] == 1)
#print (response_set)
for n in response_set:
print (n["documentRef"])
# Confirm that the request-response cycle completed successfully.
def test_3b_smartsearch_two_names_and_year(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett and Fonagy (2012)')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True) # rank is accepted, same as score
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (response_info)
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
assert(response_info["fullCount"] == 1)
#print (response_set)
for n in response_set:
print (n["documentRef"])
# Confirm that the request-response cycle completed successfully.
def test_4_search_schemafield(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=art_type:REV&sourcecode=AOP')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (f'Smarttext: {response_info["description"]}')
response_set = r["documentList"]["responseSet"]
assert(response_info["fullCount"] == 3)
print (response_set[0])
def test_5_search_author_and_journalcode(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Levin&sourcecode=AOP')
response = requests.get(full_URL, headers=headers)
r = response.json()
assert(response.ok == True)
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 12)
#print (response_set[0])
def test_6_search_author_and_journalcode_and_text(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Blum&sourcecode=AOP&fulltext1=transference')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (response_info)
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 2)
#print (response_set[0])
def test_10_DOI(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=10.3280/PU2019-004002')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
print (response_info)
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_10_locator(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=aop.033.0079a')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (response_info["count"])
assert(response_info["count"] == 1)
# print (response_set[0])
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=IJP.100.0411A')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_10_year_and_page(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=2014 153')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 7) # verified 7 matches 2020-07-26
# print (response_set[0])
def test_10_vol_and_page(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=100:272')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
# print (response_set[0])
def test_10_schema_fields(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?sourcecode=AOP&smarttext=art_type:ART OR COM')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["fullCount"])
assert(response_info["fullCount"] >= 627)
# print (response_set[0])
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?sourcecode=AJP&smarttext=art_type:PRO')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["fullCount"])
assert(response_info["fullCount"] >= 37)
print (response_set[0])
def test_11A_single_name(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett, D.')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["fullCount"])
assert(response_info["fullCount"] >= 59)
print (response_set[0])
def test_11B_multiple_name(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>., <NAME>., <NAME>.')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
print (response_set[0])
def test_11B2_anded_names(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>. and <NAME>.')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
print (response_set[0])
def test_12a_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett and Fonagy (2012)')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12b_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett and Fonagy 2012')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12c_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>, <NAME> and <NAME> (1974)')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12d_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>., <NAME>. and <NAME>. (2012)')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12e_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Tuckett and Fonagy 2012')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12f_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>. and <NAME>. (1959)')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
#print (response_set[0])
def test_12g_names_and_dates(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>. and Fonagy, P. 2012')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
print (response_set[0])
def test_12b_phrase_search(self):
"""
This is a search for a phrase
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext="Manualized Psychodynamic Psychotherapies"')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] >= 3)
print (response_set[0])
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Manualized Psychodynamic Psychotherapies')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] >= 3)
print (response_set[0])
def test_12c_word_search(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Psychoanalysis Treatment of headaches.')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["fullCount"])
assert(response_info["count"] >= 1)
print (response_set[0])
def test_13_references_a(self):
"""
Full references
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME>, <NAME>. (1974). Some Observations on Three Interracial Analyses. Int. J. Psycho-Anal., 55:495-500.')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
print (response_set[0])
def test_13_references_b(self):
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=<NAME> <NAME>. ( 1959). The Points of View and Assumptions of Metapsychology. Int. J. Psycho-Anal. 40:153-162')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["count"])
assert(response_info["count"] == 1)
print (response_set[0])
def test_13a_dts_example_searches(self):
"""
Words anded
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=Evenly Suspended Attention')
response = requests.get(full_URL, headers=headers)
assert(response.ok == True)
r = response.json()
response_info = r["documentList"]["responseInfo"]
response_set = r["documentList"]["responseSet"]
print (f'Smarttext: {response_info["description"]}')
print (response_info["fullCount"])
assert(response_info["fullCount"] >= 695 and response_info["fullCount"] <= 810)
def test_13b_dts_example_searches(self):
"""
"""
full_URL = base_plus_endpoint_encoded('/v2/Database/Search/?smarttext=transference interpretation')
response = | |
#!/usr/bin/python3
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Museo, Usuario, Comentario, Pagina_Personal
from lxml import etree
from django.template.loader import get_template
from django.template import Context
# Create your views here.
def extraer_elemento(dic, elemento):
try:
elemento = dic[elemento]
except KeyError:
elemento = ""
return elemento
def guardar_datos(dic):
dicc_datos = {}
lista_aux = ['ID_ENTIDAD', 'NOMBRE', 'DESCRIPCION_ENTIDAD', 'HORARIO', 'TRANSPORTE', 'ACCESIBILIDAD', 'CONTENT_URL', 'NOMBRE_VIA', 'CLASE_VIAL', 'TIPO_NUM', 'NUM', 'LOCALIDAD', 'CODIGO_POSTAL','PLANTA', 'BARRIO', 'DISTRITO', 'COORDENADA_X', 'COORDENADA_Y', 'LATITUD', 'LONGITUD', 'TELEFONO', 'FAX', 'EMAIL', 'EQUIPAMIENTO']
for elemento in lista_aux:
dicc_datos[elemento] = extraer_elemento(dic, elemento)
g = Museo(ID_ENTIDAD = dicc_datos['ID_ENTIDAD'], NOMBRE = dicc_datos['NOMBRE'], DESCRIPCION_ENTIDAD = dicc_datos['DESCRIPCION_ENTIDAD'], HORARIO = dicc_datos['HORARIO'], TRANSPORTE = dicc_datos['TRANSPORTE'], ACCESIBILIDAD = dicc_datos['ACCESIBILIDAD'], CONTENT_URL = dicc_datos['CONTENT_URL'], NOMBRE_VIA = dicc_datos['NOMBRE_VIA'], CLASE_VIAL = dicc_datos['CLASE_VIAL'], TIPO_NUM = dicc_datos['TIPO_NUM'], NUM = dicc_datos['NUM'], LOCALIDAD = dicc_datos['LOCALIDAD'], CODIGO_POSTAL = dicc_datos['CODIGO_POSTAL'], PLANTA = dicc_datos['PLANTA'], BARRIO = dicc_datos['BARRIO'], DISTRITO = dicc_datos['DISTRITO'], COORDENADA_X = dicc_datos['COORDENADA_X'], COORDENADA_Y = dicc_datos['COORDENADA_Y'], LATITUD = dicc_datos['LATITUD'], LONGITUD = dicc_datos['LONGITUD'], TELEFONO = dicc_datos['TELEFONO'], FAX = dicc_datos['FAX'], EMAIL = dicc_datos['EMAIL'], EQUIPAMIENTO = dicc_datos['EQUIPAMIENTO'], NUMERO_COMENTARIOS = 0)
g.save()
return None
def parsear(doc):
contenidos = doc.getroot()
for k in range(1, len(contenidos)):
contenido = contenidos[k]
atributo = contenido[1]
dic = {}
for i in range(0, len(atributo)-1):
nombre = atributo[i].attrib.get("nombre")
if nombre == "LOCALIZACION":
for j in range(0, len(atributo[i])):
nombre = atributo[i][j].attrib.get("nombre")
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i][j].text
elif nombre == "DATOSCONTACTOS":
for j in range(0, len(atributo[i])):
nombre = atributo[i][j].attrib.get("nombre")
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i][j].text
else:
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i].text
guardar_datos(dic)
return None
def crear_database():
doc = etree.parse('museos/201132-0-museos.xml')
parsear(doc)
return None
@csrf_exempt
def mostrar_principal(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").order_by('-NUMERO_COMENTARIOS')
if len(lista_museos) < 5:
lista_museos = lista_museos[0:len(lista_museos)]
else:
lista_museos = lista_museos[0:5]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'paginas_usuarios': paginas_usuarios, 'mostrar_cargar': mostrar_cargar})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/accesibles')
elif 'Next' in request.POST:
respuesta = HttpResponseRedirect('/1')
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_next(request, numero):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").order_by('-NUMERO_COMENTARIOS')
volver = False
if len(lista_museos) <= 5:
lista_museos = lista_museos[0:len(lista_museos)]
elif len(lista_museos) - int(numero)*5 < 5:
lista_museos = lista_museos[int(numero)*5:len(lista_museos)]
volver = True
else:
lista_museos = lista_museos[int(numero)*5:(int(numero)*5+5)]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/accesibles')
elif 'Next' in request.POST:
if volver:
respuesta = HttpResponseRedirect('/')
else:
respuesta = HttpResponseRedirect('/' + str(int(numero)+1))
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_accesibles(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").filter(ACCESIBILIDAD = '1').order_by('-NUMERO_COMENTARIOS')
if len(lista_museos) < 5:
lista_museos = lista_museos[0:len(lista_museos)]
else:
lista_museos = lista_museos[0:5]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Next' in request.POST:
respuesta = HttpResponseRedirect('/accesibles/1')
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_accesibles_next(request, numero):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").filter(ACCESIBILIDAD = '1').order_by('-NUMERO_COMENTARIOS')
volver = False
if len(lista_museos) <= 5:
lista_museos = lista_museos[0:len(lista_museos)]
elif len(lista_museos) - int(numero)*5 < 5:
lista_museos = lista_museos[int(numero)*5:len(lista_museos)]
volver = True
else:
lista_museos = lista_museos[int(numero)*5:(int(numero)*5+5)]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Next' in request.POST:
if volver:
respuesta = HttpResponseRedirect('/accesibles')
else:
respuesta = HttpResponseRedirect('/accesibles/' + str(int(numero)+1))
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_museos(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
mostrar = True
if request.method == "GET":
lista_museos = Museo.objects.all()
respuesta = render(request, 'museos/museos.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar': mostrar})
elif request.method == "POST":
if 'Enviar' in request.POST:
distrito = request.POST['Distrito'].upper()
lista_museos = Museo.objects.filter(DISTRITO=distrito)
mostrar = False
respuesta = render(request, 'museos/museos.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar': mostrar})
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
return respuesta
@csrf_exempt
def mostrar_app_museo(request, identificador):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
mostrar_selec = True
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
mostrar_selec = False
museo = Museo.objects.get(id=int(identificador))
comentarios = Comentario.objects.filter(museo=museo)
lista_vacia = False
if len(comentarios) == 0:
lista_vacia = True
if request.method == "GET":
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
elif request.method == "POST":
if 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Añadir a lista' in request.POST:
museos_usuario = Usuario.objects.filter(nombre=request.user.username)
try:
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=request.user.username).nombre_pagina
except Pagina_Personal.DoesNotExist:
nombre_pagina = "Página de " + request.user.username
color_cuerpo = "#FFFFFF"
color_cabecera = "#9E4528"
pagina_personal = Pagina_Personal(nombre_pagina = nombre_pagina, nombre_usuario = request.user.username, color_cuerpo = color_cuerpo, color_cabecera = color_cabecera) ##
pagina_personal.save()
if len(museos_usuario.filter(museo=museo)) == 0:
g = Usuario(nombre = request.user.username, comentario = "", museo = museo)
g.save()
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'Enviar' in request.POST:
coment = request.POST['Comentario']
if coment != "":
g = Comentario(text = request.POST['Comentario'], museo = museo)
g.save()
comentarios = Comentario.objects.filter(museo=museo)
museo.NUMERO_COMENTARIOS = museo.NUMERO_COMENTARIOS + 1
museo.save()
lista_vacia = False
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
return respuesta
@csrf_exempt
def mostrar_usuario(request, usuario):
mostrar_selec = False
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
if request.user.username == usuario:
mostrar_selec = True
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
try:
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
color_cuerpo = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cuerpo ##
color_cabecera = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cabecera ##
except Pagina_Personal.DoesNotExist:
return HttpResponse('Página no encontrada')
museos_usuario = Usuario.objects.filter(nombre=usuario)
if len(museos_usuario) < 5:
museos_usuario = museos_usuario[0:len(museos_usuario)]
else:
museos_usuario = museos_usuario[0:5]
if request.method == "GET":
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera}) ##
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Modificar' in request.POST:
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
pagina_personal.nombre_pagina = request.POST['Pagina']
pagina_personal.save()
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'color_cuerpo_boton' in request.POST: ##
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
color_cuerpo = request.POST['color_cuerpo_texto']
color_cuerpo = color_cuerpo.upper()
if color_cuerpo == "MORADO":
color_cuerpo = "#BD8ACF"
elif color_cuerpo == "AZUL":
color_cuerpo = "#706DC9"
elif color_cuerpo == "VERDE":
color_cuerpo = "#4CE656"
elif color_cuerpo == "NARANJA":
color_cuerpo = "#E38914"
elif color_cuerpo == "AMARILLO":
color_cuerpo = "#DBDB3B"
elif color_cuerpo == "ROJO":
color_cuerpo = "#ED2828"
elif color_cuerpo == | |
model, or idf==None (happens when the word is not in the model).
idf = self.model.idf(word)
idf = idf is None and 1 or idf
return self.tf(word) * idf
return self.tf(word)
tf_idf = tfidf = term_frequency_inverse_document_frequency
def information_gain(self, word):
""" Returns the information gain for the given word (0.0-1.0).
"""
if self.model is not None:
return self.model.ig(word)
return 0.0
ig = infogain = information_gain
def gain_ratio(self, word):
""" Returns the information gain ratio for the given word (0.0-1.0).
"""
if self.model is not None:
return self.model.gr(word)
return 0.0
gr = gainratio = gain_ratio
@property
def vector(self):
""" Yields the document vector, a dictionary of (word, relevance)-items from the document.
The relevance is tf, tf * idf, infogain or binary if the document is part of a Model,
based on the value of Model.weight (TF, TFIDF, IG, GR, BINARY, None).
The document vector is used to calculate similarity between two documents,
for example in a clustering or classification algorithm.
"""
if not self._vector:
# See the Vector class below = a dict with extra functionality (copy, norm).
# When a document is added/deleted from a model, the cached vector is deleted.
w = getattr(self.model, "weight", TF)
if w not in (TF, TFIDF, IG, INFOGAIN, GR, GAINRATIO, BINARY):
f = lambda w: float(self._terms[w]); w=None
if w == BINARY:
f = lambda w: int(self._terms[w] > 0)
if w == TF:
f = self.tf
if w == TFIDF:
f = self.tf_idf
if w in (IG, INFOGAIN):
f = self.model.ig
if w in (GR, GAINRATIO):
f = self.model.gr
self._vector = Vector(((w, f(w)) for w in self.terms), weight=w)
return self._vector
@property
def concepts(self):
""" Yields the document concept vector if the document is part of an LSA model.
"""
return self.model and self.model.lsa and self.model.lsa.concepts.get(self.id) or None
def keywords(self, top=10, normalized=True):
""" Returns a sorted list of (relevance, word)-tuples that are top keywords in the document.
With normalized=True, weights are normalized between 0.0 and 1.0 (their sum will be 1.0).
"""
n = normalized and sum(self.vector.values()) or 1.0
v = ((f/n, w) for w, f in self.vector.items())
v = heapq.nsmallest(top, v, key=lambda v: (-v[0], v[1]))
return v
def cosine_similarity(self, document):
""" Returns the similarity between the two documents as a number between 0.0-1.0.
If both documents are part of the same model the calculations are cached for reuse.
"""
if self.model is not None:
return self.model.cosine_similarity(self, document)
if document.model is not None:
return document.model.cosine_similarity(self, document)
return cosine_similarity(self.vector, document.vector)
similarity = cosine_similarity
def copy(self):
d = Document(None, name=self.name, type=self.type, description=self.description)
dict.update(d.terms, self.terms)
return d
def __eq__(self, document):
return isinstance(document, Document) and self.id == document.id
def __ne__(self, document):
return not self.__eq__(document)
def __repr__(self):
return "Document(id=%s%s%s)" % (
repr(self._id),
self.name and ", name=%s" % repr(self.name) or "",
self.type and ", type=%s" % repr(self.type) or "")
Bag = BagOfWords = BOW = Document
#--- VECTOR ----------------------------------------------------------------------------------------
# A Vector represents document terms (called features) and their tf or tf * idf relevance weight.
# A Vector is a sparse represenation: i.e., a dictionary with only those features > 0.
# This is fast, usually also faster than LSA which creates a full vector space with non-zero values.
# Document vectors can be used to calculate similarity between documents,
# for example in a clustering or classification algorithm.
# To find the average feature length in a model:
# sum(len(d.vector) for d in model.documents) / float(len(model))
class Vector(readonlydict):
id = 0
def __init__(self, *args, **kwargs):
""" A dictionary of (feature, weight)-items of the features (terms, words) in a Document.
A vector can be used to compare the document to another document with a distance metric.
For example, vectors with 2 features (x, y) can be compared using 2D Euclidean distance.
Vectors that represent text documents can be compared using cosine similarity.
"""
s = kwargs.pop("sparse", True)
f = ()
w = None
if len(args) > 0:
# From a Vector (copy weighting scheme).
if isinstance(args[0], Vector):
w = args[0].weight
# From a dict.
if isinstance(args[0], dict):
f = args[0].items()
# From an iterator.
elif hasattr(args[0], "__iter__"):
f = iter(args[0])
Vector.id += 1
self.id = Vector.id # Unique ID.
self.weight = kwargs.pop("weight", w) # TF, TFIDF, IG, BINARY or None.
self._norm = None # Cached L2-norm.
# Exclude zero weights (sparse=True).
f = chain(f, kwargs.items())
f = ((k, v) for k, v in f if not s or v != 0)
readonlydict.__init__(self, f)
@classmethod
def fromkeys(cls, k, default=None, **kwargs):
return Vector(((k, default) for k in k), **kwargs)
@property
def features(self):
return self.keys()
@property
def l2_norm(self):
""" Yields the Frobenius matrix norm (cached).
n = the square root of the sum of the absolute squares of the values.
The matrix norm is used to normalize (0.0-1.0) cosine similarity between documents.
"""
if self._norm is None:
self._norm = sum(w * w for w in self.values()) ** 0.5
return self._norm
norm = l2 = L2 = L2norm = l2norm = L2_norm = l2_norm
def copy(self):
return Vector(self, weight=self.weight, sparse=False)
def __call__(self, vector={}):
""" Vector(vector) returns a new vector updated with values from the given vector.
No new features are added. For example: Vector({1:1, 2:2})({1:0, 3:3}) => {1:0, 2:2}.
"""
if isinstance(vector, (Document, Model)):
vector = vector.vector
v = self.copy()
s = dict.__setitem__
for f, w in vector.items():
if f in v:
s(v, f, w)
return v
#--- VECTOR DISTANCE -------------------------------------------------------------------------------
# The "distance" between two vectors can be calculated using different metrics.
# For vectors that represent text, cosine similarity is a good metric.
# For more information, see Domain Similarity Measures (<NAME>, 2012).
# The following functions can be used if you work with Vectors or plain dictionaries,
# instead of Documents and Models (which use caching for cosine similarity).
def features(vectors=[]):
""" Returns the set of unique features for all given vectors.
"""
return set(chain(*vectors))
_features = features
def sparse(v):
""" Returns the vector with features that have weight 0 removed.
"""
for f, w in list(v.items()):
if w == 0:
del v[f]
return v
def relative(v):
""" Returns the vector with feature weights normalized so that their sum is 1.0 (in-place).
"""
n = float(sum(v.values())) or 1.0
s = dict.__setitem__
for f in v: # Modified in-place.
s(v, f, v[f] / n)
return v
normalize = rel = relative
def l2_norm(v):
""" Returns the L2-norm of the given vector.
"""
if isinstance(v, Vector):
return v.l2_norm
return sum(w * w for w in v.values()) ** 0.5
norm = l2 = L2 = L2norm = l2norm = L2_norm = l2_norm
def cosine_similarity(v1, v2):
""" Returns the cosine similarity of the given vectors.
"""
s = sum(v1.get(f, 0) * w for f, w in v2.items())
s = float(s) / (l2_norm(v1) * l2_norm(v2) or 1)
return s
cos = cosine_similarity
def tf_idf(vectors=[], base=2.71828): # Euler's number
""" Calculates tf * idf on the vector feature weights (in-place).
"""
df = {}
for v in vectors:
for f in v:
if v[f] != 0:
df[f] = df[f] + 1 if f in df else 1.0
n = len(vectors)
s = dict.__setitem__
for v in vectors:
for f in v: # Modified in-place.
s(v, f, v[f] * (log(n / df[f], base)))
return vectors
tfidf = tf_idf
COSINE, EUCLIDEAN, MANHATTAN, CHEBYSHEV, HAMMING = \
"cosine", "euclidean", "manhattan", "chebyshev", "hamming"
def distance(v1, v2, method=COSINE):
""" Returns the distance between two vectors.
"""
if method == COSINE:
return 1 - cosine_similarity(v1, v2)
if method == EUCLIDEAN: # Squared Euclidean distance is used (1.5x faster).
return sum((v1.get(w, 0) - v2.get(w, 0)) ** 2 for w in set(chain(v1, v2)))
if method == MANHATTAN:
return sum(abs(v1.get(w, 0) - v2.get(w, 0)) for w in set(chain(v1, v2)))
if method == CHEBYSHEV:
return max(abs(v1.get(w, 0) - v2.get(w, 0)) for w in set(chain(v1, v2)))
if method == HAMMING:
d = sum(not (w in v1 and w in v2 and v1[w] == | |
in comp2):
print "input1 = ",comp1
print "input2 = ",comp2
raise Exception("Problem with one input having a . & one not")
if type(comp1)==str and '.' not in comp1: #it's a dir
if not os.path.isdir(comp1) or not os.path.isdir(comp2):
print "input1 = ",comp1
print "input2 = ",comp2
raise Exception("These directories don't exist!")
files1 = glob(comp1+form+'*.fits')
files2 = glob(comp2+form+'*.fits')
if len(files1)==10 and len(files2)==10:
figs=[]
for fl1,fl2 in zip(files1,files2):
figs.append(ImageWithSpots([fl1,fl2],name1=name1,name2=name2))
return figs
else:
print "input1 = ",comp1
print "input2 = ",comp2
raise Exception("Problem with not enough files or files not matching in glob()")
elif type(comp1)==str and '.' in comp1: #it's a file
if not os.path.isfile(comp1) or not os.path.isfile(comp2):
print "input1 = ",comp1
print "input2 = ",comp2
raise Exception("These files don't exist!")
return ImageWithSpots([comp1,comp2],name1=name1,name2=name2)
elif type(comp1)==list: #it's a list of files
for fl1,fl2 in zip(comp1,comp2):
if not os.path.isfile(fl1) or not os.path.isfile(fl2):
print "either one of the files, or both files don't exist"
print "input1 problem element = ",fl1
print "input2 problem element = ",fl2
raise Exception("These files don't exist!")
figs=[]
for fl1,fl2 in zip(comp1,comp2):
figs.append(ImageWithSpots([fl1,fl2],name1=name1,name2=name2))
return figs
def ImageWithSpots(fls,Xspots=None,name1='image1',name2='image2',nameX='selected spots',mode='alpha',plotlim=None,window=None,ZeroToNan=0,ignore_scale=False,cutinfo=None):
''' this function shows an image in locked step with a collection of boolian spots to mark certain pixels that should be paid attention to
(1) if fls=image & Xspots=spots, then this function takes an image and some spots and plots them in locked mode
(2) if fls=[image1,image2] & Xspots=spots, then this function takes both images and plots them in locked mode with the spots in the middle
(3) if fls=[image1,image2] & Xspots=None, then this function takes both images and plots them in locked mode with Xspots marking the difference
NOTE: (3) is totally equivalent to the old CompareImage function!
mode is 'alpha', 'box', or 'o'
window=((X low lim,X up lim),(Y low lim, Y up lim))
ZeroToNan=(BOOL1,BOOL2) where BOOL determines if you want to plot it with 0 set to white'''
#PlotImage(fl,Xspots=None,ax=None,plotlim=None,name='Input Image',mode='alpha',window=None,ZeroToNan=True,cbar=True):
if plotlim:
CCDextrema=plotlim
elif ignore_scale:
CCDextrema=None
else:
CCDextrema=FilesLimits([fls])
two_fls_bool= type(fls)==list
if two_fls_bool:
fl1=fls[0]
fl2=fls[1]
#this should give the exact same behavior as CompareImage(fls[0],fls[1])
if Xspots==None:
print "Now in compare mode"
im1=GetImage(fl1)
im2=GetImage(fl2)
Xspots=im1!=im2
if not Xspots.any():
print "Exactly the same!"
return 0
diffs=nonzero((im1!=im2))
spot=diffs[0][0],diffs[1][0]
print "im1[",spot,"]=",im1[spot]
print "im2[",spot,"]=",im2[spot]
else:
fl1=fls
NXs=sum(Xspots)
print "there are",NXs,"# of pixels in Xspots"
if NXs>50000:
if mode in ['rect','Rectangle','rectangle','box','boxes']:
print 'setting mode="o" since there are so many different points'
mode='o'
#do stuff for file#1
im1=GetImage(fl1)
if window:
xx,yy=window
im1=im1[yy[0]:yy[1],xx[0]:xx[1]]
image1=im1.copy()
if ZeroToNan==True:
try:
image1[image1==0]=nan
except ValueError:
image1=asarray(image1,dtype=float32)
image1[image1==0]=nan
#do stuff for file#2
if two_fls_bool:
im2=GetImage(fl2)
if window:
xx,yy=window
im2=im2[yy[0]:yy[2],xx[0]:xx[2]]
if (im1==im2).all():
print "Globally different, but exactly the same within this window!"
#return 0
image2=im2.copy()
if ZeroToNan==True:
try:
image2[image2==0]=nan
except ValueError:
image2=asarray(image2,dtype=float32)
image2[image2==0]=nan
if cutinfo:
name1=name1+' fraction cut='+str(round(sum(isnan(image1))/float(image1.size),7))+'\n'+str(isnan(image1).sum())+' total pixels cut'
name2=name2+' fraction cut='+str(round(sum(isnan(image2))/float(image2.size),7))+'\n'+str(isnan(image2).sum())+' total pixels cut'
imshape=im1.shape
fig=figure(figsize=(22,13.625))
if two_fls_bool:
ax1=fig.add_subplot(121)
ax2=fig.add_subplot(122,sharex=ax1,sharey=ax1)
fig=PlotImage(image2,Xspots=Xspots,ax=ax2,plotlim=CCDextrema,name=name2,ZeroToNan=0,cbar=False,mode=mode)
else:
ax1=fig.add_subplot(111)
fig=PlotImage(image1,Xspots=Xspots,ax=ax1,plotlim=CCDextrema,name=name1,ZeroToNan=0,cbar=False,mode=mode)
ax1.set_adjustable('box-forced')
ax1.set_ylim(-.5,imshape[0]+.5);ax1.set_xlim(-.5,imshape[1]+.5)
fig.tight_layout()
fig.canvas.draw()
box1=ax1.get_position()
ax1.set_position(box1.translated(-.05,0))
box1=ax1.get_position()
axnew=fig.add_axes(box1.shrunk(.53,.6).translated(.4,.2),sharex=ax1,sharey=ax1)
Xx,Xy=nonzero(Xspots)
axnew.scatter(Xy,Xx,marker='o',facecolors='none',edgecolors='k')
axnew.set_adjustable('box-forced')
axnew.set_ylim(-.5,imshape[0]+.5);axnew.set_xlim(-.5,imshape[1]+.5)
axnew.set_title(nameX)
if two_fls_bool:
ax2.set_adjustable('box-forced')
ax2.set_ylim(-.5,imshape[0]+.5);ax2.set_xlim(-.5,imshape[1]+.5)
box2=ax2.get_position()
ax2.set_position(box2.translated(.07,0))
fig.canvas.draw()
#namespace.update(locals())
return fig
def CompareImage(fl1,fl2,name1='Old',name2='New',mode='alpha',plotlim=None,window=None,ZeroToNan=(1,1),cutinfo=False):
'''CompareImage isn't a function anymore! You should instead run ImageWithSpots'''
print "CompareImage isn't a function anymore! You should instead run ImageWithSpots"
fig=ImageWithSpots([fl1,fl2],name1=name1,name2=name2,mode=mode,plotlim=plotlim,window=window,ZeroToNan=ZeroToNan[0],cutinfo=cutinfo)
return fig
def PlotLockedImages(fl1,fl2,name1='Old',name2='New',colorbar_same=False):
'''plot these two files side-by-side and lock them so that when you zoom it'll zoom on both windows'''
im1=GetImage(fl1)
im2=GetImage(fl2)
imshape=im1.shape
vmin1=scipy.stats.scoreatpercentile(im1.flatten(),.1)
vmin2=scipy.stats.scoreatpercentile(im2.flatten(),.1)
vmax1=scipy.stats.scoreatpercentile(im1.flatten(),99.5)
vmax2=scipy.stats.scoreatpercentile(im2.flatten(),99.5)
if colorbar_same:
vmin=min(vmin1,vmin2)
vmax=max(vmax1,vmax2)
(vmin1,vmin2)=(vmin,vmin)
(vmax1,vmax2)=(vmax,vmax)
fig=figure(figsize=(16,12))
ax1=fig.add_subplot(1,2,1)
title(name1)
imshowed=ax1.imshow(im1,vmin=vmin1,vmax=vmax1,interpolation='nearest',origin='lower left')
colbar=colorbar(imshowed,ax=ax1)
ax1.set_adjustable('box-forced')
ax1.set_ylim(-.5,imshape[0]+.5);ax1.set_xlim(-.5,imshape[1]+.5)
fig.tight_layout()
fig.canvas.draw()
box1=ax1.get_position()
ax1.set_position(box1.translated(-.05,0))
box1=ax1.get_position()
#plot next image
ax2=fig.add_subplot(1,2,2,sharex=ax1,sharey=ax1)
title(name2)
imshowed=ax2.imshow(im2,vmin=vmin2,vmax=vmax2,interpolation='nearest',origin='lower left')
colbar=colorbar(imshowed,ax=ax2)
ax2.set_adjustable('box-forced')
ax2.set_ylim(-.5,imshape[0]+.5);ax2.set_xlim(-.5,imshape[1]+.5)
box2=ax2.get_position()
ax2.set_position(box2.translated(.07,0))
return fig,ax1,ax2
def PlotImage(fl,Xspots=None,ax=None,plotlim=None,name='Input Image',mode='alpha',window=None,ZeroToNan=True,cbar=True):
''' This function takes a file or image and plots it with either boxes or alpha differences over the points in Xspots
mode is 'alpha', 'box', or 'o'
window=((X low lim,X up lim),(Y low lim, Y up lim))
ZeroToNan=BOOL #determines if you want to plot it with 0 set to white'''
im=GetImage(fl)
if not plotlim:
plotlim=ImageLimits(fl,delta=.001)
print "Set Plot Limits to Extrema: ",plotlim
if window:
xx,yy=window
im=im[yy[0]:yy[1],xx[0]:xx[1]]
if not ax:
fig=figure(figsize=(10,15))
ax=fig.add_subplot(1,1,1)
else:
fig=ax.figure
image=im.copy()
if ZeroToNan:image[image==0]=nan
ax.set_title(name+'\nfraction cut='+str(round(sum(isnan(image))/float(image.size),7))+' of '+str(isnan(image).sum())+' total pixels cut ')
#SIMPLE_RECTS: if this isn't working well enough, then consider trying to get BETTER_RECTS to work (below)
if not (Xspots is None):
if mode in ['circles','o','circle']:
Xx,Xy=nonzero(Xspots)
ax.scatter(Xy,Xx,edgecolors='k',facecolors='None')
imshowed=ax.imshow(image,vmin=plotlim[0],vmax=plotlim[1],interpolation='nearest',origin='lower left')
if mode in ['rect','Rectangle','rectangle','box','boxes']:
Xx,Xy=nonzero(Xspots)
for x,y in zip(Xx,Xy):
#txt=ax.text(y,x,'X',color='w',alpha=.5)
rect=Rectangle((y-.5,x-.5),1,1,axes=ax,color='k',fill=False,ec='k')
ax.add_patch(rect)
imshowed=ax.imshow(image,vmin=plotlim[0],vmax=plotlim[1],interpolation='nearest',origin='lower left')
if mode in ['alpha','Alpha']:
my_norm = matplotlib.colors.Normalize(vmin=plotlim[0],vmax=plotlim[1])
my_cmap = copy.copy(cm.get_cmap('jet'))
c_data= my_cmap(my_norm(image)) #RGBA
c_data[:, :, 3] = .8 # make everything half alpha
c_data[Xspots, 3] = 1 # reset the marked pixels as full opacity
#plot them
extent=[0,image.shape[1],0,image.shape[0]]
imshowed=ax.imshow(c_data,extent=extent, interpolation='nearest',origin='lower left',norm=my_norm)
#imshowed=ax.imshow(image,vmin=plotlim[0],vmax=plotlim[1],interpolation='nearest',origin='lower left')
if cbar: colbar=colorbar(imshowed,ax=ax)
else:
imshowed=ax.imshow(image,vmin=plotlim[0],vmax=plotlim[1],interpolation='nearest',origin='lower left')
if cbar: colbar=colorbar(imshowed,ax=ax)
return fig
#BETTER_RECTS
#if 0<Xspots.sum()<1000:
# Xx,Xy=nonzero(Xspots)
# for x,y in zip(Xx,Xy):
# print x,y
# rect=Rectangle((y-.5,x-.5),1,1,color='w',ec='w',fill=False)
# ax1.add_patch(rect)
# ax2.add_patch(rect)
#elif 1000<Xspots.sum():
# xs=Xspots.copy()
# labels,Nlabel=scipy.ndimage.label(Xspots)
# slices = scipy.ndimage.find_objects(labels)
# for lab in range(1,Nlabel+1):
# sl=slices[lab-1]
# FracInSlice=(labels[sl]==lab).mean()
# if FracInSlice>.75:
# Center,Size=slice_center_size(sl)
# x,y=int(sl[0].start),int(sl[1].start)
# rect=Rectangle((y-.5,x-.5),Size[1],Size[0],color='w',fill=False,ec='w')
# ax1.add_patch(rect)
# ax2.add_patch(rect)
# Xspots[sl]=False
# Xx,Xy=nonzero(Xspots)
# for x,y in zip(Xx,Xy):
# rect=Rectangle((y-.5,x-.5),1,1,color='w',fill=False,ec='w')
# ax1.add_patch(rect)
# ax2.add_patch(rect)
##################post-RegionMaker era (July 2013 or later)################################
def ImageLimits(flOar,delta=0.001):
''' this takes a file (or an array), and returns the limiting values in the image'''
im=GetImage(flOar)
MASKED=(im[isfinite(im)]>=0.0).all()
if MASKED:
#if lower bound of the image is 0, then assume 0 indicates a WEIGHT=0 (masked pixel)
CCDextrema=[im[im>0.0].min()*(1-delta),im[isfinite(im)].max()*(1+delta)]
else:
#if there are negative values, we assume there are no weights
CCDextrema=[im[isfinite(im)].min()*(1-delta),im[isfinite(im)].max()*(1+delta)]
if isnan(im).any():
print "there are "+str(sum(isnan(im)))+" nans in image which are ignored"
if CCDextrema[0]<0:
CCDextrema[0]*=(1+delta)/(1-delta)
return CCDextrema
def OrderFiles(files): #re-write #useless
'''orders files in increasing order of CCD number'''
sorted_files=range(10)
for fl in files:
if (not '.fits' in fl) and (not '.reg' in fl):
raise Exception("Input "+fl+" isn't a fits file or a region file!")
elif fl=="-i":continue
else:
CCDnum=GetCCD(fl)
sorted_files[CCDnum-1]=fl
return sorted_files
def MaskMiddles(image,corners=1):
self_ar=array([[0,0,0],[0,1,0],[0,0,0]],dtype=bool)
image_nans=isnan(image)
hom=zeros(image_nans.shape,dtype=bool)
if corners:
for corner in [(0,0),(0,-1),(-1,0),(-1,-1)]:
hit_corner=zeros((3,3),dtype=bool)
hit_corner[corner]=1
hit=scipy.ndimage.morphology.binary_dilation(hit_corner,conn4)
hom+=scipy.ndimage.morphology.binary_hit_or_miss(image_nans, structure1=hit, structure2=self_ar)
LandR=array([[0,0,0],[1,0,1],[0,0,0]],dtype=bool)
UandD=LandR.T
hom+=scipy.ndimage.morphology.binary_hit_or_miss(image_nans, structure1=LandR, structure2=logical_not(LandR))
hom+=scipy.ndimage.morphology.binary_hit_or_miss(image_nans, structure1=UandD, structure2=logical_not(UandD))
image[hom]=nan
return image
def FilesLimits(files,delta=.001):
'''This takes a list of files, and gives you the limits to use for vmin and vmax in plotting those files together'''
mins,maxs=[],[]
for fl in files:
minmin,maxmax=ImageLimits(fl,delta=delta)
mins.append(minmin)
maxs.append(maxmax)
#try:
# for fl in files:
# minmin,maxmax=ImageLimits(fl,delta=delta)
# mins.append(minmin)
# maxs.append(maxmax)
#except Exception:
# #it was given a file, not a list of files!
# print "WARNING: Don't Use imagetools.FilesLimits for this! Use imagetools.ImageLimits"
# fl=files
# minmin,maxmax=ImageLimits(fl,delta=delta)
# mins.append(minmin)
# maxs.append(maxmax)
return (min(mins),max(maxs))
def FilesHists(files,shape=(4,4),title=None,lims=None,Nbins=201): #use #re-write (ImageHist, generalize) #potential
'''This takes a list of files, and plots histograms of those files together'''
if not lims:
lims=FilesLimits(files)
bins=linspace(lims[0],lims[1],Nbins)
fig=figure(figsize=(20,15))
fig.subplots_adjust(hspace=.2,wspace=.2,right=.97,top=.97, bottom=.03, left=.03)
if title: fig.suptitle(title)
size=shape[0]*shape[1]
if size>len(files):
print "more files than plotting size alotted, so I'm taking only ",size," of possible ",len(files)
files=files[:size]
for num,fl in enumerate(files):
fit=pyfits.open(fl)
data=fit[0].data
#CCDnum=GetCCD(fl)
light=data.reshape((-1,))
ax=fig.add_subplot(shape[0],shape[1],num+1)
x,bins,patches=hist(light,bins=bins,log=True)
ax.set_xlim(lims[0],lims[1])
return fig
def GetImage(flOar):
'''This takes a file, and returns the image from the fits file. If you give it an array, it assumes that is the image and just returns the argument'''
#if type(flOar)==str and os.path.isfile(flOar):
try:
fitfl=pyfits.open(flOar)
im=fitfl[0].data
fitfl.close()
except:
if type(flOar)==ndarray:
im=flOar
elif type(flOar)==ma.core.MaskedArray:
im=flOar
elif type(flOar)==list:
im=array(flOar)
else:
print "input = ",flOar
raise Exception("Cannot get an image from this input!")
return im
def variance_estimate(data,err,mean=None,nfitpars=0): #simple #step3_run_fit
if mean==None:
d = 0
w = 0
for i in xrange(len(data)):
w += 1/err[i]**2.
d += data[i]/err[i]**2.
mean = d/w
w = 0
d = 0
for i in xrange(len(data)):
w += 1/err[i]**2.
d += 1/err[i]**2.*(data[i] - mean)**2.
weight_variance = d/w
variance = scipy.var(data)
n = 0
d = 0
for i in xrange(len(data)):
n += 1.
d += ((data[i] - mean)/err[i])**2.
''' this is not quite right '''
redchi = d/(n-nfitpars)
print 'variance_estimate| variance=',variance , ' weight_variance=',weight_variance , ' redchi=',redchi
return variance, weight_variance, redchi
def Hist_Overflow(data,bins,ax=None,**kwargs):
'''this will plot a histogram with a hatched overflow bin'''
if not bins[-1]==inf:
bins=append(bins,[inf])
if not ax:
f=figure(**kwargs)
ax=f.add_subplot(111)
x,bins,patches=ax.hist(data,bins=bins,**kwargs)
ax.bar(left=bins[-2],height=x[-1],width=bins[2]-bins[1],fc='b',ec='w',hatch='\\\\\\\\\\',label='overflow')
ax.set_xlim(bins[0],bins[-2]+(bins[2]-bins[1]))
yd,yu=ax.get_ylim()
xd,xu=ax.get_xlim()
if kwargs.has_key('log'):
ytxt=.7*(log10(yd)+log10(yu))
else:
ytxt=.7*(yd+yu)
width=(bins[2]-bins[1])
over=x[-1]
meanmean=data.mean()
minmin=data.min()
maxmax=data.max()
if type(minmin)==float: printit='min/max = '+str(round(minmin,2))+'/'+str(round(maxmax,2))+'\nmean = '+str(round(meanmean,2))
else: printit='min/max = '+str(minmin)+'/'+str(maxmax)+'\nmean = '+str(meanmean)
#if over>0 and over<5:
# overpts=data[data>bins[-2]]
# printit+='\n'+str(over)+' overflow points:'
# for overpt in overpts: printit+='\n'+str(overpt)
# ax.text(bins[0]+width,ytxt,printit)
#else:
# ax.text(bins[0]+width,ytxt,printit)
ax.text(xd+(xu-xd)*.2,ytxt,printit)
return ax
def AxesCompact(fig,compact=.1): #plotting
''' take fig and divide it up into a bunch of axes, then return the figure and the list of axes.
Ex: fig,axes = imagetools.AxesList(fig=fig,shape=(2,5))'''
small=compact
big=1-compact
fig.subplots_adjust(left=small,bottom=small,top=big,right=big)
fig.canvas.draw()
return fig
def AxesList(fig,shape,compact=False,**kwargs): #plotting
''' take fig and divide it up into a bunch of axes, then return the figure and the list of axes.
Ex: fig,axes = imagetools.AxesList(fig=fig,shape=(2,5))'''
if compact != False:
small=compact
big=1-compact
fig.subplots_adjust(left=small,bottom=small,top=big,right=big)
for key in kwargs.keys():
if key in 'left bottom right top wspace hspace':
exec "fig.subplots_adjust("+key+"=kwargs[key])"
size=shape[0]*shape[1]
axes=[]
for i in range(size):
axes.append(fig.add_subplot(shape[0],shape[1],i+1))
return fig,axes
def AxesSameLims(fig,axes=False,compact=False,**kwargs): #plotting
'''take fig and make axes limits match'''
fig.canvas.draw()
if not axes:
axes=fig.get_axes()
for key in kwargs.keys():
if key in 'left bottom right top wspace hspace':
fig.subplots_adjust(key=kwargs[key])
yUPlims=[]
yDNlims=[]
xUPlims=[]
xDNlims=[]
for ax in axes:
xd,xu=ax.get_xlim()
yd,yu=ax.get_ylim()
xUPlims.append(xu)
yUPlims.append(yu)
xDNlims.append(xd)
yDNlims.append(yd)
xDN=min(xDNlims)
yDN=min(yDNlims)
xUP=max(xUPlims)
yUP=max(yUPlims)
for ax in axes:
ax.set_xlim(xDN,xUP)
ax.set_ylim(yDN,yUP)
if compact != False:
fig.canvas.draw()
small=compact
big=1-compact
fig.subplots_adjust(left=small,bottom=small,top=big,right=big)
fig.subplots_adjust(hspace=small,wspace=small)
(xN,yN,N) = axes[0].get_geometry()
N=xN*yN
xx=zeros((xN,yN),dtype=bool)
yy=zeros((xN,yN),dtype=bool)
yy[:,0]=1
xx[-1,:]=1
axesN = arange(N).reshape(xN,yN)
noYlabel=axesN[logical_not(yy)]
noXlabel=axesN[logical_not(xx)]
for yn in noYlabel:
axes[yn].set_yticklabels([])
for xn in noXlabel:
axes[xn].set_xticklabels([])
Ylabel=axesN[yy]
Xlabel=axesN[xx]
for yn in Ylabel:
#axes[yn].set_yticklabels(axes[yn].get_yticklabels()[:-1])
ax=axes[yn]
ytl = ax.get_yticklabels()
labels=[yt.get_text() for yt in ytl]
labels[-1]=''
ax.set_yticklabels(labels)
for xn in Xlabel:
#axes[xn].set_xticklabels(axes[xn].get_yticklabels()[:-1])
ax=axes[xn]
xtl = ax.get_xticklabels()
labels=[xt.get_text() for xt in xtl]
labels[-1]=''
ax.set_xticklabels(labels)
fig.canvas.draw()
return fig
def AxesSameColors(fig,axes=None,lims=None,cbar_ax_pos=[0.96, 0.05, 0.01, 0.9],**kwargs): #plotting
'''take fig and make color axes limits match. This will change the **colors** and the **colorbars**, so that the colors and colorbars will both be correct!
if set `lims=(down,up)` in the inputs, then you specify limits, otherwise the limits are chosen from the extrema of the existing colorbars
cbar_ax_pos=[*left*, *bottom*, *width*,*height*]
if you don't want a colorbar, then cbar_ax_pos=False'''
fig.canvas.draw()
try:
if not axes:
axes=fig.get_axes()
if not lims:
UPlims=[]
DNlims=[]
for ax in axes:
for im in ax.get_images():
d,u=im.get_clim()
UPlims.append(u)
DNlims.append(d)
DN=min(DNlims)
UP=max(UPlims)
else:
DN,UP=lims
for ax in axes:
for im in ax.get_images():
im.set_clim(DN,UP)
if cbar_ax_pos:
make_room=cbar_ax_pos[0]
fig.subplots_adjust(right=make_room+.005)
cbar_ax=fig.add_axes(cbar_ax_pos)
fig.colorbar(im,cax=cbar_ax)
fig.canvas.draw()
return fig
except:
namespace.update(locals())
raise
def AxesStripText(fig,axes=False,labels=True,titles=True,allticks=True,alllabels=False,alltitles=False,**kwargs): #plotting
fig.canvas.draw()
if not axes:
axes=fig.get_axes()
for ax in axes:
#ax.set_title('')
if allticks:
numXL=len(ax.get_xticklabels())
ax.set_xticklabels(['']*numXL)
numyL=len(ax.get_yticklabels())
ax.set_yticklabels(['']*numyL)
if alllabels:
try:
ax.set_xlabel('')
except AttributeError:pass
try:
ax.set_ylabel('')
except AttributeError:pass
elif labels:
try:
if ax.is_last_row(): pass
else:ax.set_xlabel('')
except AttributeError:pass
try:
if ax.is_first_col(): pass
else:ax.set_ylabel('')
except AttributeError:pass
if alltitles:
ax.set_title('')
elif titles:
try:
if ax.is_first_row(): pass
else:ax.set_title('')
except AttributeError:pass
fig.canvas.draw()
return fig
def AxesRowColumn(fig,rows,columns,axes=False,**kwargs): #plotting
'''take fig and make color axes limits match. This will change the **colors** and the **colorbars**, so that the colors and colorbars will both be correct!'''
fig.canvas.draw()
if not axes:
axes=fig.get_axes()
try:
rcount=0
ccount=0
for ax in axes:
if ax.is_first_row(): #col
title=ax.get_title()
if title: ax.set_title(columns[ccount]+'\n'+title)
else: ax.set_title(columns[ccount])
ccount+=1
if ax.is_first_col(): #row
yl=ax.get_ylabel()
ax.set_ylabel(rows[rcount]+'\n'+yl)
if yl: ax.set_ylabel(rows[rcount]+'\n'+yl)
else: ax.set_ylabel(rows[rcount])
rcount+=1
##fig.text(x,y,s)
fig.canvas.draw()
return fig
except:
namespace.update(locals())
raise
def imstats_params(fl,dictout=False): #command
'''this function returns the statistics given by the `imstats` command
i.e. the mode, lquartile, median, uquartile, mean, sigma
it is totally equivalent to the following BASH command:
imstats ${fl} | tail -1 | awk {print $2, $3, $4, $5, $6, $7}'''
p1=Popen(["imstats",fl],stdout=PIPE)
p2=Popen(["tail","-1"],stdin=p1.stdout,stdout=PIPE)
p1.stdout.close()
p3=Popen(["awk", "{print $2, $3, $4, $5, $6, $7}"],stdin=p2.stdout,stdout=PIPE)
p2.stdout.close()
output=p3.communicate()[0]
outputs=output.split(' ')
mode, lquartile, median, uquartile, mean, sigma = (float(param) for param in outputs)
if dictout:
imstats={}
imstats['mode']=mode
imstats['lquartile']=lquartile
imstats['median']=median
imstats['uquartile']=uquartile
imstats['mean']=mean
imstats['sigma']=sigma
return imstats
return mode, lquartile, median, uquartile, mean, sigma
def sextractor_RMS(fl,getback=False):#command
'''this function returns the statistics given by the `sextractor` command
i.e. the background and the RMS.'''
tmpfl="/u/ki/awright/InstallingSoftware/pythons/sextractimtools/"+os.path.basename(fl)[:-5]+".log"
cosmic_fl="/u/ki/awright/InstallingSoftware/pythons/sextractimtools/cosmic%s.cat" % (id_generator(10),)
print "**REMINDER** for now I'm using the theli version of sextractor (2.2.2) rather than the default (2.8.6) | |
to string representation.
Returns
-------
np.ma
Masked array of LULC strings
"""
return base.convert_lulc_id_to_class(
self.lulc_matrix_original, mapping=self.original_lulc_mapping
)
def _assign_property_rights(self):
arr = self.block_definition_matrix_pixel_lvl.copy()
for k, v in dicts.stakeholder_property_dict.items():
for j in v:
arr[arr == j] = k
self.property_rights_matrix = arr
def update_lulc_matrix_based_on_mgmt_decisions(
self, current_round=None, seed=42
):
# TODO [some time in the new year]:
# clean up and simplify, the code is really messy as of now
# if map updating should always follow the same random process.
# if seed=None, it changes for each function call.
if seed is not None:
np.random.seed(seed)
# leaves us the freedom to explicitly choose a round,
# but defaults to using the class parameter value
if current_round is None:
current_round = self.current_round
# max pixels
n_pixels_per_block_max = self.n_pixels_per_block ** 2
n_pixels_max = self.n_pixels ** 2
# query relevant mgmt decisions of current round
mgmt_decisions_of_round = self.df_mgmt_decisions_long.query(
"Round == {current_round}".format(current_round=current_round)
)
# all blocks
blocks = self.block_definition_matrix_block_lvl.flatten()
# copy lulc_matrix data from the matrix stack
mar_field_2d = self.lulc_matrix_stack[
:, :, self.current_round - 1
].copy()
mar_field_1d = mar_field_2d.flatten()
# iterate over blocks where lulc should be
# updated based on mgmt decision
# ---------------------------------------------------
for block in blocks:
# copy mar_field in each block iteration
mar = mar_field_2d.copy()
# query relevant mgmt decisions of current round
# TODO: find tailored solution for SSDA
mgmt_decisions_of_round_and_block = mgmt_decisions_of_round.query(
"Plot == {block} & Player != 'SSDA'".format(block=block)
)
# continue to next block in case no mgmt decision has been made
if mgmt_decisions_of_round_and_block.empty:
continue
else:
# create a boolean mask which is True over the
# pixels of the particular block
mgmt_mask = self.block_definition_matrix_pixel_lvl == block
# update the mask of the lulc_matrix based on the block mask
mar[~mgmt_mask] = np.ma.masked
# calculate number of counts per lulc category
unique, counts = np.unique(mar, return_counts=True)
# infer number of unmasked px of block (this is where the
# we can actually update the lulc as I don't let them
# expand any areas outside of the biosphere)
n_pixels_unmasked, n_pixels_masked_total = (
counts[:-1],
counts[-1],
)
n_pixels_unmasked_in_block = (
n_pixels_max - n_pixels_masked_total
)
lulc_types_unmasked = unique[unique.mask == False].data
# store info as dict
block_lulc_distribution = dict(
zip(lulc_types_unmasked, n_pixels_unmasked)
)
# create index matrix
mar_ix_1d = np.arange(mar.ravel().size)
mar_ix_1d_masked = np.ma.masked_array(mar_ix_1d, mask=mar.mask)
mar_ix_2d_masked = mar_ix_1d_masked.reshape(
self.n_pixels, self.n_pixels
)
# iterate through the lulc types
for (
group_id,
group_data,
) in mgmt_decisions_of_round_and_block.groupby("Player"):
# ------------------------------------------------
# 1) Farmers Decisions
# ------------------------------------------------
# decision on cattle
cattle_farming_conversion_decision = (
group_data[group_data["lulc_category_id"] == 4]
.loc[:, "mgmt_decision"]
.values[0]
)
# decision on sheep: magnitude with opposite sign of cattle
sheep_farming_conversion_decision = (
cattle_farming_conversion_decision * -1
)
# farmer's decicions on native forest are evaluated only
# after the farming type conversions (if any)
native_forest_farmer_decision = (
group_data[group_data["lulc_category_id"] == 2]
.loc[:, "mgmt_decision"]
.values[0]
)
# A) check if action for farming type conversion is needed
# ------------------------------------------------
if np.isclose(
cattle_farming_conversion_decision, 0
) or np.isnan(cattle_farming_conversion_decision):
# no action
pass
else:
# action
# update lulc matrix in block via random sampling
# of array elements without replacement
# case 1: convert from sheep to cattle
if cattle_farming_conversion_decision > 0:
convert_to = 4
# case 2: convert from cattle back to sheep
elif cattle_farming_conversion_decision < 0:
convert_to = 1
else:
# TODO: check
continue
# case 3: plant native forest, via random assignment of
# possibly all of sheep, cattle and native forest
# TODO
# random draw among those indices that are within
# the block and playing field
sample_size = int(
np.abs(cattle_farming_conversion_decision)
)
# respect an upper limit on the maximum sample size
if sample_size > n_pixels_unmasked_in_block:
sample_size = n_pixels_unmasked_in_block
ix_sel_for_draw = mar_ix_1d_masked[
mar_ix_1d_masked.mask == False
]
random_ix_1d = np.random.choice(
a=ix_sel_for_draw, size=sample_size, replace=False
)
# update 1d copy of field lulc matrix within
# block at drawn indices
mar_field_1d[random_ix_1d] = convert_to
# B) check if action for conversion to more native
# forest is needed
# ------------------------------------------------
# TODO: comment out if crashing
if (
np.isclose(native_forest_farmer_decision, 0)
or np.isnan(native_forest_farmer_decision)
or native_forest_farmer_decision < 0
):
# no action
pass
else:
# action
# update lulc matrix in block via random sampling
# of array elements without replacement
# ------------------------------------------------
# case 3: plant native forest, via random assignment of
# possibly all of sheep, cattle and native forest
convert_to = 2
# random draw among those indices that are within
# the block and playing field
sample_size = int(np.abs(native_forest_farmer_decision))
# respect an upper limit on the maximum sample size
if sample_size > n_pixels_unmasked_in_block:
sample_size = n_pixels_unmasked_in_block
ix_sel_for_draw = mar_ix_1d_masked[
mar_ix_1d_masked.mask == False
]
random_ix_1d = np.random.choice(
a=ix_sel_for_draw, size=sample_size, replace=False
)
# update 1d copy of field lulc matrix within
# block at drawn indices
mar_field_1d[random_ix_1d] = convert_to
# ------------------------------------------------
# 2) Foresters Decisions
# ------------------------------------------------
# A) conversion from native forest to commercial forest
# ------------------------------------------------
native_forest_conversion_decision = (
group_data[group_data["lulc_category_id"] == 2]
.loc[:, "mgmt_decision"]
.values[0]
)
if np.isclose(
native_forest_conversion_decision, 0
) or np.isnan(native_forest_conversion_decision):
pass
else:
# update lulc matrix in block via random sampling
# of array elements without replacement
# ------------------------------------------------
# determine which forest category to convert to
if native_forest_conversion_decision > 0:
convert_forest_to = 2
elif native_forest_conversion_decision < 0:
convert_forest_to = 3
else:
# TODO: check
continue
# random draw among those indices that are within
# the block and playing field
sample_size = int(
np.abs(native_forest_conversion_decision)
)
# adjust sample size based on boundary conditions
if sample_size > n_pixels_unmasked_in_block:
sample_size = n_pixels_unmasked_in_block
ix_sel_for_draw = mar_ix_1d_masked[
mar_ix_1d_masked.mask == False
]
random_ix_1d = np.random.choice(
a=ix_sel_for_draw, size=sample_size, replace=False
)
# update 1d copy of field lulc matrix within
# block at drawn indices
mar_field_1d[random_ix_1d] = convert_forest_to
# reshape 1d mar back to 2d
mar_field_2d_updated = mar_field_1d.reshape(
self.n_pixels, self.n_pixels
)
# update 3d
mar_field_3d_updated = mar_field_2d_updated.reshape(
mar_field_2d_updated.shape[0], mar_field_2d_updated.shape[1], 1
)
# obsolete:
# plot the updated block after the block iteration is done
# plt.imshow(mar, cmap=self.cmap_lulc)
# plt.imshow(mar_field_2d_updated, cmap=self.cmap_lulc, alpha=1)
# plt.imshow(mar_ix_2d_masked, alpha=0.5)
# finally, add new lulc map to lulc_matrix_stack
# --------------------------------------------------------
try:
self.lulc_matrix_stack = np.ma.append(
a=self.lulc_matrix_stack, b=mar_field_3d_updated, axis=2
)
except UnboundLocalError as msg:
print(
"Error: {} \n"
"-> Very likely, either the Plot Number or "
"the Mgmt Decision is missing. Check all sheets!".format(msg)
)
def update_data_store(
self, current_round=None, teamwork=None, ssda_choice=None
):
# update income, unemployment, prices etc
# leaves us the freedom to explicitly choose a round if needed
if current_round is None:
current_round = self.current_round
# leaves us the freedom to explicitly choose if needed
if teamwork is None:
teamwork = model.teamwork(self.cooperation_matrix_block_lvl)
# ---------------------------------------------------------------------
# update stakeholder-unspecific area totals
# ---------------------------------------------------------------------
unique_global, counts_global = np.unique(
self.lulc_matrix_stack[:, :, self.current_round], return_counts=True
)
d_unique_counts_global = dict(
zip(unique_global[:-1], counts_global[:-1])
)
self.data_store["variable_model_params"]["area_sheep_total"].append(
d_unique_counts_global[1]
)
self.data_store["variable_model_params"]["area_n_forest_total"].append(
d_unique_counts_global[2]
)
self.data_store["variable_model_params"]["area_c_forest_total"].append(
d_unique_counts_global[3]
)
self.data_store["variable_model_params"]["area_cattle_total"].append(
d_unique_counts_global[4]
)
# ---------------------------------------------------------------------
# update stakeholder-specific area totals
# ---------------------------------------------------------------------
# iteratively populate the stakeholder-specific data stores
# TODO [low]: ultimatively remove hard-coding in the following sequence
for (
stakeholder_name,
stakeholder_id,
) in dicts.stakeholder_id_dict.items():
if stakeholder_name == "SSDA":
continue
# select data of property that stakeholder owns
lulc_data_sel = self.lulc_matrix_stack[:, :, self.current_round][
self.property_rights_matrix == stakeholder_id
]
# get unique value counts
unique, counts = np.unique(lulc_data_sel, return_counts=True)
d_unique_counts = dict(zip(unique[:-1], counts[:-1]))
# init relevant data store parts
try:
self.data_store[stakeholder_name]["area_sheep"].append(
d_unique_counts[1]
)
except KeyError as msg:
print(msg)
continue
try:
self.data_store[stakeholder_name]["area_n_forest"].append(
d_unique_counts[2]
)
except KeyError as msg:
print(msg)
continue
try:
self.data_store[stakeholder_name]["area_c_forest"].append(
d_unique_counts[3]
)
except KeyError as msg:
print(msg)
continue
try:
self.data_store[stakeholder_name]["area_cattle"].append(
d_unique_counts[4]
)
except KeyError as msg:
print(msg)
continue
# ---------------------------------------------------------------------
# teamwork update
# ---------------------------------------------------------------------
self.data_store["variable_model_params"]["teamwork"].append(teamwork)
# ---------------------------------------------------------------------
# Tourism update
# ---------------------------------------------------------------------
if ssda_choice is not None:
tourism_mask = self.block_definition_matrix_pixel_lvl == ssda_choice
# copy lulc_matrix data from stack
mar = self.lulc_matrix_stack[:, :, self.current_round].copy()
# update the mask of the lulc_matrix based on the block mask
mar[~tourism_mask] = np.ma.masked
# calc
brexit_boolean = True if self.current_round == self.brexit_round else False
number_tourists, tourism_factor = model.tourism_factor(
tourism_factor_matrix=mar,
gdp_tourism_factor=40, # TODO: removing hard-coding needed?
brexit=brexit_boolean
)
# update lists
| |
*, code: Optional[ErrorCode] = None) -> None:
self.note_func(msg, ctx, code=code)
@contextmanager
def tvar_scope_frame(self) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = self.tvar_scope.method_frame()
yield
self.tvar_scope = old_scope
def infer_type_variables(self,
type: CallableType) -> List[Tuple[str, TypeVarLikeExpr]]:
"""Return list of unique type variables referred to in a callable."""
names = [] # type: List[str]
tvars = [] # type: List[TypeVarLikeExpr]
for arg in type.arg_types:
for name, tvar_expr in arg.accept(
TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope)
):
if name not in names:
names.append(name)
tvars.append(tvar_expr)
# When finding type variables in the return type of a function, don't
# look inside Callable types. Type variables only appearing in
# functions in the return type belong to those functions, not the
# function we're currently analyzing.
for name, tvar_expr in type.ret_type.accept(
TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope, include_callables=False)
):
if name not in names:
names.append(name)
tvars.append(tvar_expr)
return list(zip(names, tvars))
def bind_function_type_variables(
self, fun_type: CallableType, defn: Context
) -> Sequence[TypeVarLikeDef]:
"""Find the type variables of the function type and bind them in our tvar_scope"""
if fun_type.variables:
for var in fun_type.variables:
var_node = self.lookup_qualified(var.name, defn)
assert var_node, "Binding for function type variable not found within function"
var_expr = var_node.node
assert isinstance(var_expr, TypeVarLikeExpr)
self.tvar_scope.bind_new(var.name, var_expr)
return fun_type.variables
typevars = self.infer_type_variables(fun_type)
# Do not define a new type variable if already defined in scope.
typevars = [(name, tvar) for name, tvar in typevars
if not self.is_defined_type_var(name, defn)]
defs = [] # type: List[TypeVarLikeDef]
for name, tvar in typevars:
if not self.tvar_scope.allow_binding(tvar.fullname):
self.fail('Type variable "{}" is bound by an outer class'.format(name), defn)
self.tvar_scope.bind_new(name, tvar)
binding = self.tvar_scope.get_binding(tvar.fullname)
assert binding is not None
defs.append(binding)
return defs
def is_defined_type_var(self, tvar: str, context: Context) -> bool:
tvar_node = self.lookup_qualified(tvar, context)
if not tvar_node:
return False
return self.tvar_scope.get_binding(tvar_node) is not None
def anal_array(self, a: Iterable[Type], nested: bool = True) -> List[Type]:
res = [] # type: List[Type]
for t in a:
res.append(self.anal_type(t, nested))
return res
def anal_type(self, t: Type, nested: bool = True) -> Type:
if nested:
self.nesting_level += 1
try:
return t.accept(self)
finally:
if nested:
self.nesting_level -= 1
def anal_var_def(self, var_def: TypeVarLikeDef) -> TypeVarLikeDef:
if isinstance(var_def, TypeVarDef):
return TypeVarDef(
var_def.name,
var_def.fullname,
var_def.id.raw_id,
self.anal_array(var_def.values),
var_def.upper_bound.accept(self),
var_def.variance,
var_def.line
)
else:
return var_def
def anal_var_defs(self, var_defs: Sequence[TypeVarLikeDef]) -> List[TypeVarLikeDef]:
return [self.anal_var_def(vd) for vd in var_defs]
def named_type_with_normalized_str(self, fully_qualified_name: str) -> Instance:
"""Does almost the same thing as `named_type`, except that we immediately
unalias `builtins.bytes` and `builtins.unicode` to `builtins.str` as appropriate.
"""
python_version = self.options.python_version
if python_version[0] == 2 and fully_qualified_name == 'builtins.bytes':
fully_qualified_name = 'builtins.str'
if python_version[0] >= 3 and fully_qualified_name == 'builtins.unicode':
fully_qualified_name = 'builtins.str'
return self.named_type(fully_qualified_name)
def named_type(self, fully_qualified_name: str,
args: Optional[List[Type]] = None,
line: int = -1,
column: int = -1) -> Instance:
node = self.lookup_fqn_func(fully_qualified_name)
assert isinstance(node.node, TypeInfo)
any_type = AnyType(TypeOfAny.special_form)
return Instance(node.node, args or [any_type] * len(node.node.defn.type_vars),
line=line, column=column)
def tuple_type(self, items: List[Type]) -> TupleType:
any_type = AnyType(TypeOfAny.special_form)
return TupleType(items, fallback=self.named_type('builtins.tuple', [any_type]))
TypeVarLikeList = List[Tuple[str, TypeVarLikeExpr]]
# Mypyc doesn't support callback protocols yet.
MsgCallback = Callable[[str, Context, DefaultNamedArg(Optional[ErrorCode], 'code')], None]
def get_omitted_any(disallow_any: bool, fail: MsgCallback, note: MsgCallback,
orig_type: Type, python_version: Tuple[int, int],
fullname: Optional[str] = None,
unexpanded_type: Optional[Type] = None) -> AnyType:
if disallow_any:
nongen_builtins = get_nongen_builtins(python_version)
if fullname in nongen_builtins:
typ = orig_type
# We use a dedicated error message for builtin generics (as the most common case).
alternative = nongen_builtins[fullname]
fail(message_registry.IMPLICIT_GENERIC_ANY_BUILTIN.format(alternative), typ,
code=codes.TYPE_ARG)
else:
typ = unexpanded_type or orig_type
type_str = typ.name if isinstance(typ, UnboundType) else format_type_bare(typ)
fail(
message_registry.BARE_GENERIC.format(quote_type_string(type_str)),
typ,
code=codes.TYPE_ARG)
base_type = get_proper_type(orig_type)
base_fullname = (
base_type.type.fullname if isinstance(base_type, Instance) else fullname
)
# Ideally, we'd check whether the type is quoted or `from __future__ annotations`
# is set before issuing this note
if python_version < (3, 9) and base_fullname in GENERIC_STUB_NOT_AT_RUNTIME_TYPES:
# Recommend `from __future__ import annotations` or to put type in quotes
# (string literal escaping) for classes not generic at runtime
note(
"Subscripting classes that are not generic at runtime may require "
"escaping, see https://mypy.readthedocs.io/en/stable/runtime_troubles.html"
"#not-generic-runtime",
typ,
code=codes.TYPE_ARG)
any_type = AnyType(TypeOfAny.from_error, line=typ.line, column=typ.column)
else:
any_type = AnyType(
TypeOfAny.from_omitted_generics, line=orig_type.line, column=orig_type.column
)
return any_type
def fix_instance(t: Instance, fail: MsgCallback, note: MsgCallback,
disallow_any: bool, python_version: Tuple[int, int],
use_generic_error: bool = False,
unexpanded_type: Optional[Type] = None,) -> None:
"""Fix a malformed instance by replacing all type arguments with Any.
Also emit a suitable error if this is not due to implicit Any's.
"""
if len(t.args) == 0:
if use_generic_error:
fullname = None # type: Optional[str]
else:
fullname = t.type.fullname
any_type = get_omitted_any(disallow_any, fail, note, t, python_version, fullname,
unexpanded_type)
t.args = (any_type,) * len(t.type.type_vars)
return
# Invalid number of type parameters.
n = len(t.type.type_vars)
s = '{} type arguments'.format(n)
if n == 0:
s = 'no type arguments'
elif n == 1:
s = '1 type argument'
act = str(len(t.args))
if act == '0':
act = 'none'
fail('"{}" expects {}, but {} given'.format(
t.type.name, s, act), t, code=codes.TYPE_ARG)
# Construct the correct number of type arguments, as
# otherwise the type checker may crash as it expects
# things to be right.
t.args = tuple(AnyType(TypeOfAny.from_error) for _ in t.type.type_vars)
t.invalid = True
def expand_type_alias(node: TypeAlias, args: List[Type],
fail: MsgCallback, no_args: bool, ctx: Context, *,
unexpanded_type: Optional[Type] = None,
disallow_any: bool = False) -> Type:
"""Expand a (generic) type alias target following the rules outlined in TypeAlias docstring.
Here:
target: original target type (contains unbound type variables)
alias_tvars: type variable names
args: types to be substituted in place of type variables
fail: error reporter callback
no_args: whether original definition used a bare generic `A = List`
ctx: context where expansion happens
"""
exp_len = len(node.alias_tvars)
act_len = len(args)
if exp_len > 0 and act_len == 0:
# Interpret bare Alias same as normal generic, i.e., Alias[Any, Any, ...]
return set_any_tvars(node, ctx.line, ctx.column,
disallow_any=disallow_any, fail=fail,
unexpanded_type=unexpanded_type)
if exp_len == 0 and act_len == 0:
if no_args:
assert isinstance(node.target, Instance) # type: ignore[misc]
# Note: this is the only case where we use an eager expansion. See more info about
# no_args aliases like L = List in the docstring for TypeAlias class.
return Instance(node.target.type, [], line=ctx.line, column=ctx.column)
return TypeAliasType(node, [], line=ctx.line, column=ctx.column)
if (exp_len == 0 and act_len > 0
and isinstance(node.target, Instance) # type: ignore[misc]
and no_args):
tp = Instance(node.target.type, args)
tp.line = ctx.line
tp.column = ctx.column
return tp
if act_len != exp_len:
fail('Bad number of arguments for type alias, expected: %s, given: %s'
% (exp_len, act_len), ctx)
return set_any_tvars(node, ctx.line, ctx.column, from_error=True)
typ = TypeAliasType(node, args, ctx.line, ctx.column)
assert typ.alias is not None
# HACK: Implement FlexibleAlias[T, typ] by expanding it to typ here.
if (isinstance(typ.alias.target, Instance) # type: ignore
and typ.alias.target.type.fullname == 'mypy_extensions.FlexibleAlias'):
exp = get_proper_type(typ)
assert isinstance(exp, Instance)
return exp.args[-1]
return typ
def set_any_tvars(node: TypeAlias,
newline: int, newcolumn: int, *,
from_error: bool = False,
disallow_any: bool = False,
fail: Optional[MsgCallback] = None,
unexpanded_type: Optional[Type] = None) -> Type:
if from_error or disallow_any:
type_of_any = TypeOfAny.from_error
else:
type_of_any = TypeOfAny.from_omitted_generics
if disallow_any:
assert fail is not None
otype = unexpanded_type or node.target
type_str = otype.name if isinstance(otype, UnboundType) else format_type_bare(otype)
fail(message_registry.BARE_GENERIC.format(quote_type_string(type_str)),
Context(newline, newcolumn), code=codes.TYPE_ARG)
any_type = AnyType(type_of_any, line=newline, column=newcolumn)
return TypeAliasType(node, [any_type] * len(node.alias_tvars), newline, newcolumn)
def remove_dups(tvars: Iterable[T]) -> List[T]:
# Get unique elements in order of appearance
all_tvars = set() # type: Set[T]
new_tvars = [] # type: List[T]
for t in tvars:
if t not in all_tvars:
new_tvars.append(t)
all_tvars.add(t)
return new_tvars
def flatten_tvars(ll: Iterable[List[T]]) -> List[T]:
return remove_dups(chain.from_iterable(ll))
class TypeVarLikeQuery(TypeQuery[TypeVarLikeList]):
def __init__(self,
lookup: Callable[[str, Context], Optional[SymbolTableNode]],
scope: 'TypeVarLikeScope',
*,
include_callables: bool = True,
include_bound_tvars: bool = False) -> None:
self.include_callables = include_callables
self.lookup = lookup
self.scope = scope
self.include_bound_tvars = include_bound_tvars
super().__init__(flatten_tvars)
def _seems_like_callable(self, type: UnboundType) -> bool:
if not type.args:
return False
if isinstance(type.args[0], (EllipsisType, TypeList)):
return True
return False
def visit_unbound_type(self, t: UnboundType) -> TypeVarLikeList:
name = t.name
node = self.lookup(name, t)
if node and isinstance(node.node, TypeVarLikeExpr) and (
self.include_bound_tvars or self.scope.get_binding(node) is None):
assert isinstance(node.node, TypeVarLikeExpr)
return [(name, node.node)]
elif not self.include_callables and self._seems_like_callable(t):
return []
elif node and node.fullname in ('typing_extensions.Literal', | |
3.88')
mel.eval('setAttr "lShldr.rotateZ" 7.05')
mel.eval('setAttr "rShldr.rotateX" 0.54')
mel.eval('setAttr "rShldr.rotateY" -3.88')
mel.eval('setAttr "rShldr.rotateZ" -7.05')
mel.eval('setAttr "lForeArm.rotateX" -0.26')
mel.eval('setAttr "lForeArm.rotateY" 9.49')
mel.eval('setAttr "lForeArm.rotateZ" -0.13')
mel.eval('setAttr "rForeArm.rotateX" -0.26')
mel.eval('setAttr "rForeArm.rotateY" -9.49')
mel.eval('setAttr "rForeArm.rotateZ" 0.13')
mel.eval('setAttr "lHand.rotateX" 16.88')
mel.eval('setAttr "lHand.rotateY" -1.91')
mel.eval('setAttr "lHand.rotateZ" -0.71')
mel.eval('setAttr "rHand.rotateX" 16.88')
mel.eval('setAttr "rHand.rotateY" 1.91')
mel.eval('setAttr "rHand.rotateZ" 0.71')
mel.eval('setAttr "lThumb1.rotateX" -9.98')
mel.eval('setAttr "lThumb1.rotateY" -9.51')
mel.eval('setAttr "lThumb1.rotateZ" 5.4')
mel.eval('setAttr "rThumb1.rotateX" -9.98')
mel.eval('setAttr "rThumb1.rotateY" 9.51')
mel.eval('setAttr "rThumb1.rotateZ" -5.4')
mel.eval('setAttr "lThumb2.rotateX" -0.6')
mel.eval('setAttr "lThumb2.rotateY" -12.73')
mel.eval('setAttr "lThumb2.rotateZ" 0.27')
mel.eval('setAttr "rThumb2.rotateX" -0.6')
mel.eval('setAttr "rThumb2.rotateY" 12.73')
mel.eval('setAttr "rThumb2.rotateZ" -0.27')
mel.eval('setAttr "lThumb3.rotateX" -0.6')
mel.eval('setAttr "lThumb3.rotateY" -12.73')
mel.eval('setAttr "lThumb3.rotateZ" 0.27')
mel.eval('setAttr "rThumb3.rotateX" -0.6')
mel.eval('setAttr "rThumb3.rotateY" 12.73')
mel.eval('setAttr "rThumb3.rotateZ" -0.27')
mel.eval('setAttr "lIndex1.rotateX" -6.65')
mel.eval('setAttr "lIndex1.rotateY" -7.61')
mel.eval('setAttr "lIndex1.rotateZ" 5.69')
mel.eval('setAttr "rIndex1.rotateX" -6.65')
mel.eval('setAttr "rIndex1.rotateY" 7.61')
mel.eval('setAttr "rIndex1.rotateZ" -5.69')
mel.eval('setAttr "lIndex2.rotateX" 3.39')
mel.eval('setAttr "lIndex2.rotateY" -1.2')
mel.eval('setAttr "lIndex2.rotateZ" 21.0')
mel.eval('setAttr "rIndex2.rotateX" 3.39')
mel.eval('setAttr "rIndex2.rotateY" 1.2')
mel.eval('setAttr "rIndex2.rotateZ" -21.0')
mel.eval('setAttr "lIndex3.rotateX" -0.0')
mel.eval('setAttr "lIndex3.rotateY" -0.0')
mel.eval('setAttr "lIndex3.rotateZ" 0.0')
mel.eval('setAttr "rIndex3.rotateX" -0.0')
mel.eval('setAttr "rIndex3.rotateY" 0.0')
mel.eval('setAttr "rIndex3.rotateZ" -0.0')
mel.eval('setAttr "lMid1.rotateX" 1.02')
mel.eval('setAttr "lMid1.rotateY" -13.59')
mel.eval('setAttr "lMid1.rotateZ" 14.46')
mel.eval('setAttr "rMid1.rotateX" 1.02')
mel.eval('setAttr "rMid1.rotateY" 13.59')
mel.eval('setAttr "rMid1.rotateZ" -14.46')
mel.eval('setAttr "lMid2.rotateX" 0.12')
mel.eval('setAttr "lMid2.rotateY" -4.91')
mel.eval('setAttr "lMid2.rotateZ" 20.86')
mel.eval('setAttr "rMid2.rotateX" 0.12')
mel.eval('setAttr "rMid2.rotateY" 4.91')
mel.eval('setAttr "rMid2.rotateZ" -20.86')
mel.eval('setAttr "lMid3.rotateX" 0.31')
mel.eval('setAttr "lMid3.rotateY" -1.98')
mel.eval('setAttr "lMid3.rotateZ" -2.4')
mel.eval('setAttr "rMid3.rotateX" 0.31')
mel.eval('setAttr "rMid3.rotateY" 1.98')
mel.eval('setAttr "rMid3.rotateZ" 2.4')
mel.eval('setAttr "lRing1.rotateX" -2.7')
mel.eval('setAttr "lRing1.rotateY" -16.45')
mel.eval('setAttr "lRing1.rotateZ" 26.89')
mel.eval('setAttr "rRing1.rotateX" -2.7')
mel.eval('setAttr "rRing1.rotateY" 16.45')
mel.eval('setAttr "rRing1.rotateZ" -26.89')
mel.eval('setAttr "lRing2.rotateX" -1.93')
mel.eval('setAttr "lRing2.rotateY" -5.11')
mel.eval('setAttr "lRing2.rotateZ" 12.0')
mel.eval('setAttr "rRing2.rotateX" -1.93')
mel.eval('setAttr "rRing2.rotateY" 5.11')
mel.eval('setAttr "rRing2.rotateZ" -12.0')
mel.eval('setAttr "lRing3.rotateX" 0.0')
mel.eval('setAttr "lRing3.rotateY" -0.0')
mel.eval('setAttr "lRing3.rotateZ" 0.0')
mel.eval('setAttr "rRing3.rotateX" 0.0')
mel.eval('setAttr "rRing3.rotateY" 0.0')
mel.eval('setAttr "rRing3.rotateZ" -0.0')
mel.eval('setAttr "lPinky1.rotateX" -2.24')
mel.eval('setAttr "lPinky1.rotateY" -18.15')
mel.eval('setAttr "lPinky1.rotateZ" 18.9')
mel.eval('setAttr "rPinky1.rotateX" -2.24')
mel.eval('setAttr "rPinky1.rotateY" 18.15')
mel.eval('setAttr "rPinky1.rotateZ" -18.9')
mel.eval('setAttr "lPinky2.rotateX" 3.15')
mel.eval('setAttr "lPinky2.rotateY" -6.07')
mel.eval('setAttr "lPinky2.rotateZ" 23.72')
mel.eval('setAttr "rPinky2.rotateX" 3.15')
mel.eval('setAttr "rPinky2.rotateY" 6.07')
mel.eval('setAttr "rPinky2.rotateZ" -23.72')
mel.eval('setAttr "lPinky3.rotateX" 5.42')
mel.eval('setAttr "lPinky3.rotateY" 7.35')
mel.eval('setAttr "lPinky3.rotateZ" 2.93')
mel.eval('setAttr "rPinky3.rotateX" 5.42')
mel.eval('setAttr "rPinky3.rotateY" -7.35')
mel.eval('setAttr "rPinky3.rotateZ" -2.93')
mel.eval('setAttr "lThigh.rotateX" -0.37')
mel.eval('setAttr "lThigh.rotateY" -0.45')
mel.eval('setAttr "lThigh.rotateZ" -4.37')
mel.eval('setAttr "rThigh.rotateX" -0.37')
mel.eval('setAttr "rThigh.rotateY" 0.45')
mel.eval('setAttr "rThigh.rotateZ" 4.37')
mel.eval('setAttr "lFoot.rotateX" -0.57')
mel.eval('setAttr "lFoot.rotateY" -6.61')
mel.eval('setAttr "lFoot.rotateZ" -3.66')
mel.eval('setAttr "rFoot.rotateX" -0.57')
mel.eval('setAttr "rFoot.rotateY" 6.61')
mel.eval('setAttr "rFoot.rotateZ" 3.66')
except:
print("Sentinel fix")
# TODO: Remove hardocing
def gen1_rotations_fix():
try:
mel.eval('setAttr "lShldr.rotateX" -0.0')
mel.eval('setAttr "lShldr.rotateY" 5.7')
mel.eval('setAttr "lShldr.rotateZ" 2.73')
mel.eval('setAttr "rShldr.rotateX" -0.0')
mel.eval('setAttr "rShldr.rotateY" -5.7')
mel.eval('setAttr "rShldr.rotateZ" -2.73')
mel.eval('setAttr "lForeArm.rotateX" 0.0')
mel.eval('setAttr "lForeArm.rotateY" 20.0')
mel.eval('setAttr "lForeArm.rotateZ" -1.78')
mel.eval('setAttr "rForeArm.rotateX" 0.0')
mel.eval('setAttr "rForeArm.rotateY" -20.0')
mel.eval('setAttr "rForeArm.rotateZ" 1.78')
mel.eval('setAttr "lHand.rotateX" -0.0')
mel.eval('setAttr "lHand.rotateY" -0.0')
mel.eval('setAttr "lHand.rotateZ" 0.0')
mel.eval('setAttr "rHand.rotateX" -0.0')
mel.eval('setAttr "rHand.rotateY" 0.0')
mel.eval('setAttr "rHand.rotateZ" -0.0')
mel.eval('setAttr "lThumb1.rotateX" 0.0')
mel.eval('setAttr "lThumb1.rotateY" 0.0')
mel.eval('setAttr "lThumb1.rotateZ" 2.94')
mel.eval('setAttr "rThumb1.rotateX" 0.0')
mel.eval('setAttr "rThumb1.rotateY" -0.0')
mel.eval('setAttr "rThumb1.rotateZ" -2.94')
mel.eval('setAttr "lThumb2.rotateX" 0.0')
mel.eval('setAttr "lThumb2.rotateY" -0.0')
mel.eval('setAttr "lThumb2.rotateZ" 0.0')
mel.eval('setAttr "rThumb2.rotateX" 0.0')
mel.eval('setAttr "rThumb2.rotateY" 0.0')
mel.eval('setAttr "rThumb2.rotateZ" -0.0')
mel.eval('setAttr "lThumb3.rotateX" 0.0')
mel.eval('setAttr "lThumb3.rotateY" -0.0')
mel.eval('setAttr "lThumb3.rotateZ" 0.0')
mel.eval('setAttr "rThumb3.rotateX" 0.0')
mel.eval('setAttr "rThumb3.rotateY" 0.0')
mel.eval('setAttr "rThumb3.rotateZ" -0.0')
mel.eval('setAttr "lCarpal1.rotateX" -0.0')
mel.eval('setAttr "lCarpal1.rotateY" -0.0')
mel.eval('setAttr "lCarpal1.rotateZ" 0.0')
mel.eval('setAttr "rCarpal1.rotateX" -0.0')
mel.eval('setAttr "rCarpal1.rotateY" 0.0')
mel.eval('setAttr "rCarpal1.rotateZ" -0.0')
mel.eval('setAttr "lIndex1.rotateX" -5.14')
mel.eval('setAttr "lIndex1.rotateY" 12.62')
mel.eval('setAttr "lIndex1.rotateZ" 16.12')
mel.eval('setAttr "rIndex1.rotateX" -5.14')
mel.eval('setAttr "rIndex1.rotateY" -12.62')
mel.eval('setAttr "rIndex1.rotateZ" -16.12')
mel.eval('setAttr "lIndex2.rotateX" 7.45')
mel.eval('setAttr "lIndex2.rotateY" -3.01')
mel.eval('setAttr "lIndex2.rotateZ" 27.34')
mel.eval('setAttr "rIndex2.rotateX" 7.45')
mel.eval('setAttr "rIndex2.rotateY" 3.01')
mel.eval('setAttr "rIndex2.rotateZ" -27.34')
mel.eval('setAttr "lIndex3.rotateX" 4.44')
mel.eval('setAttr "lIndex3.rotateY" -0.0')
mel.eval('setAttr "lIndex3.rotateZ" 12.1')
mel.eval('setAttr "rIndex3.rotateX" 4.44')
mel.eval('setAttr "rIndex3.rotateY" 0.0')
mel.eval('setAttr "rIndex3.rotateZ" -12.1')
mel.eval('setAttr "lMid1.rotateX" 5.49')
mel.eval('setAttr "lMid1.rotateY" 3.03')
mel.eval('setAttr "lMid1.rotateZ" 17.56')
mel.eval('setAttr "rMid1.rotateX" 5.49')
mel.eval('setAttr "rMid1.rotateY" -3.03')
mel.eval('setAttr "rMid1.rotateZ" -17.56')
mel.eval('setAttr "lMid2.rotateX" -0.62')
mel.eval('setAttr "lMid2.rotateY" 0.01')
mel.eval('setAttr "lMid2.rotateZ" 32.07')
mel.eval('setAttr "rMid2.rotateX" -0.62')
mel.eval('setAttr "rMid2.rotateY" -0.01')
mel.eval('setAttr "rMid2.rotateZ" -32.07')
mel.eval('setAttr "lMid3.rotateX" 5.7')
mel.eval('setAttr "lMid3.rotateY" 0.01')
mel.eval('setAttr "lMid3.rotateZ" 16.17')
mel.eval('setAttr "rMid3.rotateX" 5.7')
mel.eval('setAttr "rMid3.rotateY" -0.01')
mel.eval('setAttr "rMid3.rotateZ" -16.17')
mel.eval('setAttr "lCarpal2.rotateX" -0.0')
mel.eval('setAttr "lCarpal2.rotateY" -0.0')
mel.eval('setAttr "lCarpal2.rotateZ" 0.0')
mel.eval('setAttr "rCarpal2.rotateX" -0.0')
mel.eval('setAttr "rCarpal2.rotateY" 0.0')
mel.eval('setAttr "rCarpal2.rotateZ" -0.0')
mel.eval('setAttr "lRing1.rotateX" 4.68')
mel.eval('setAttr "lRing1.rotateY" -3.36')
mel.eval('setAttr "lRing1.rotateZ" 19.99')
mel.eval('setAttr "rRing1.rotateX" 4.68')
mel.eval('setAttr "rRing1.rotateY" 3.36')
mel.eval('setAttr "rRing1.rotateZ" -19.99')
mel.eval('setAttr "lRing2.rotateX" 3.19')
mel.eval('setAttr "lRing2.rotateY" 1.4')
mel.eval('setAttr "lRing2.rotateZ" 32.05')
mel.eval('setAttr "rRing2.rotateX" 3.19')
mel.eval('setAttr "rRing2.rotateY" -1.4')
mel.eval('setAttr "rRing2.rotateZ" -32.05')
mel.eval('setAttr "lRing3.rotateX" 4.9')
mel.eval('setAttr "lRing3.rotateY" 0.17')
mel.eval('setAttr "lRing3.rotateZ" 5.06')
mel.eval('setAttr "rRing3.rotateX" 4.9')
mel.eval('setAttr "rRing3.rotateY" -0.17')
mel.eval('setAttr "rRing3.rotateZ" -5.06')
mel.eval('setAttr "lPinky1.rotateX" 6.72')
mel.eval('setAttr "lPinky1.rotateY" -12.44')
mel.eval('setAttr "lPinky1.rotateZ" 23.36')
mel.eval('setAttr "rPinky1.rotateX" 6.72')
mel.eval('setAttr "rPinky1.rotateY" 12.44')
mel.eval('setAttr "rPinky1.rotateZ" -23.36')
mel.eval('setAttr "lPinky2.rotateX" 7.11')
mel.eval('setAttr "lPinky2.rotateY" 6.24')
mel.eval('setAttr "lPinky2.rotateZ" 38.7')
mel.eval('setAttr "rPinky2.rotateX" 7.11')
mel.eval('setAttr "rPinky2.rotateY" -6.24')
mel.eval('setAttr "rPinky2.rotateZ" -38.7')
mel.eval('setAttr "lPinky3.rotateX" 0.0')
mel.eval('setAttr "lPinky3.rotateY" 0.0')
mel.eval('setAttr "lPinky3.rotateZ" -0.0')
mel.eval('setAttr "rPinky3.rotateX" 0.0')
mel.eval('setAttr "rPinky3.rotateY" -0.0')
mel.eval('setAttr "rPinky3.rotateZ" 0.0')
mel.eval('setAttr "lFoot.rotateX" -0.1')
mel.eval('setAttr "lFoot.rotateY" -11.32')
mel.eval('setAttr "lFoot.rotateZ" 3.78')
mel.eval('setAttr "rFoot.rotateX" -0.1')
mel.eval('setAttr "rFoot.rotateY" 11.32')
mel.eval('setAttr "rFoot.rotateZ" -3.78')
except:
print("Gen1RotsFix...")
# TODO: Remove hardocing
def gen2_rotations_fix():
try:
mel.eval('setAttr "lShldr.rotateX" -0.0')
mel.eval('setAttr "lShldr.rotateY" 5.44')
mel.eval('setAttr "lShldr.rotateZ" 1.87')
mel.eval('setAttr "rShldr.rotateX" -0.0')
mel.eval('setAttr "rShldr.rotateY" -5.44')
mel.eval('setAttr "rShldr.rotateZ" -1.87')
mel.eval('setAttr "lForeArm.rotateX" 0.62')
mel.eval('setAttr "lForeArm.rotateY" 20.19')
mel.eval('setAttr "lForeArm.rotateZ" -1.69')
mel.eval('setAttr "rForeArm.rotateX" 0.62')
mel.eval('setAttr "rForeArm.rotateY" -20.19')
mel.eval('setAttr "rForeArm.rotateZ" 1.69')
mel.eval('setAttr "lHand.rotateX" 15.11')
mel.eval('setAttr "lHand.rotateY" -0.28')
mel.eval('setAttr "lHand.rotateZ" -0.59')
mel.eval('setAttr "rHand.rotateX" 15.11')
mel.eval('setAttr "rHand.rotateY" 0.28')
mel.eval('setAttr "rHand.rotateZ" 0.59')
mel.eval('setAttr "lThumb1.rotateX" -0.0')
mel.eval('setAttr "lThumb1.rotateY" -0.0')
mel.eval('setAttr "lThumb1.rotateZ" 0.0')
mel.eval('setAttr "rThumb1.rotateX" -0.0')
mel.eval('setAttr "rThumb1.rotateY" 0.0')
mel.eval('setAttr "rThumb1.rotateZ" -0.0')
mel.eval('setAttr "lThumb2.rotateX" -0.0')
mel.eval('setAttr "lThumb2.rotateY" -0.0')
mel.eval('setAttr "lThumb2.rotateZ" -0.0')
mel.eval('setAttr "rThumb2.rotateX" -0.0')
mel.eval('setAttr "rThumb2.rotateY" 0.0')
mel.eval('setAttr "rThumb2.rotateZ" 0.0')
mel.eval('setAttr "lThumb3.rotateX" 0.0')
mel.eval('setAttr "lThumb3.rotateY" -0.0')
mel.eval('setAttr "lThumb3.rotateZ" -0.0')
mel.eval('setAttr "rThumb3.rotateX" 0.0')
mel.eval('setAttr "rThumb3.rotateY" 0.0')
mel.eval('setAttr "rThumb3.rotateZ" 0.0')
mel.eval('setAttr "lCarpal1.rotateX" -0.29')
mel.eval('setAttr "lCarpal1.rotateY" -0.4')
mel.eval('setAttr "lCarpal1.rotateZ" -2.3')
mel.eval('setAttr "rCarpal1.rotateX" -0.29')
mel.eval('setAttr "rCarpal1.rotateY" 0.4')
mel.eval('setAttr "rCarpal1.rotateZ" 2.3')
mel.eval('setAttr "lIndex1.rotateX" 0.15')
mel.eval('setAttr "lIndex1.rotateY" 10.31')
mel.eval('setAttr "lIndex1.rotateZ" 0.16')
mel.eval('setAttr "rIndex1.rotateX" 0.15')
mel.eval('setAttr "rIndex1.rotateY" -10.31')
mel.eval('setAttr "rIndex1.rotateZ" -0.16')
mel.eval('setAttr "lIndex2.rotateX" -1.01')
mel.eval('setAttr "lIndex2.rotateY" -0.08')
mel.eval('setAttr "lIndex2.rotateZ" 13.96')
mel.eval('setAttr "rIndex2.rotateX" -1.01')
mel.eval('setAttr "rIndex2.rotateY" 0.08')
mel.eval('setAttr "rIndex2.rotateZ" -13.96')
mel.eval('setAttr "lIndex3.rotateX" -0.0')
mel.eval('setAttr "lIndex3.rotateY" 0.0')
mel.eval('setAttr "lIndex3.rotateZ" 0.0')
mel.eval('setAttr "rIndex3.rotateX" -0.0')
mel.eval('setAttr "rIndex3.rotateY" -0.0')
mel.eval('setAttr "rIndex3.rotateZ" -0.0')
mel.eval('setAttr "lMid1.rotateX" 5.85')
mel.eval('setAttr "lMid1.rotateY" 2.88')
mel.eval('setAttr "lMid1.rotateZ" -0.0')
mel.eval('setAttr "rMid1.rotateX" 5.85')
mel.eval('setAttr "rMid1.rotateY" -2.88')
mel.eval('setAttr "rMid1.rotateZ" 0.0')
mel.eval('setAttr "lMid2.rotateX" 0.0')
mel.eval('setAttr "lMid2.rotateY" 0.0')
mel.eval('setAttr "lMid2.rotateZ" 9.13')
mel.eval('setAttr "rMid2.rotateX" 0.0')
mel.eval('setAttr "rMid2.rotateY" -0.0')
mel.eval('setAttr "rMid2.rotateZ" -9.13')
mel.eval('setAttr "lMid3.rotateX" -0.0')
mel.eval('setAttr "lMid3.rotateY" 0.0')
mel.eval('setAttr "lMid3.rotateZ" 0.0')
mel.eval('setAttr "rMid3.rotateX" -0.0')
mel.eval('setAttr "rMid3.rotateY" -0.0')
mel.eval('setAttr "rMid3.rotateZ" -0.0')
mel.eval('setAttr "lCarpal2.rotateX" 0.11')
mel.eval('setAttr "lCarpal2.rotateY" 0.02')
mel.eval('setAttr "lCarpal2.rotateZ" -0.72')
mel.eval('setAttr "rCarpal2.rotateX" 0.11')
mel.eval('setAttr "rCarpal2.rotateY" -0.02')
mel.eval('setAttr "rCarpal2.rotateZ" 0.72')
mel.eval('setAttr "lRing1.rotateX" 2.91')
mel.eval('setAttr "lRing1.rotateY" -4.8')
mel.eval('setAttr "lRing1.rotateZ" -0.0')
mel.eval('setAttr "rRing1.rotateX" 2.91')
mel.eval('setAttr "rRing1.rotateY" 4.8')
mel.eval('setAttr "rRing1.rotateZ" 0.0')
mel.eval('setAttr "lRing2.rotateX" 12.88')
mel.eval('setAttr "lRing2.rotateY" 1.94')
mel.eval('setAttr "lRing2.rotateZ" 7.8')
mel.eval('setAttr "rRing2.rotateX" 12.88')
mel.eval('setAttr "rRing2.rotateY" -1.94')
mel.eval('setAttr "rRing2.rotateZ" -7.8')
mel.eval('setAttr "lRing3.rotateX" 0.0')
mel.eval('setAttr "lRing3.rotateY" -0.0')
mel.eval('setAttr "lRing3.rotateZ" 0.0')
mel.eval('setAttr "rRing3.rotateX" 0.0')
mel.eval('setAttr "rRing3.rotateY" 0.0')
mel.eval('setAttr "rRing3.rotateZ" -0.0')
mel.eval('setAttr "lPinky1.rotateX" 0.0')
mel.eval('setAttr "lPinky1.rotateY" -6.84')
mel.eval('setAttr "lPinky1.rotateZ" 0.0')
mel.eval('setAttr "rPinky1.rotateX" 0.0')
mel.eval('setAttr "rPinky1.rotateY" 6.84')
mel.eval('setAttr "rPinky1.rotateZ" -0.0')
mel.eval('setAttr "lPinky2.rotateX" 0.0')
mel.eval('setAttr "lPinky2.rotateY" 0.0')
mel.eval('setAttr "lPinky2.rotateZ" 10.86')
mel.eval('setAttr "rPinky2.rotateX" 0.0')
mel.eval('setAttr "rPinky2.rotateY" -0.0')
mel.eval('setAttr "rPinky2.rotateZ" -10.86')
mel.eval('setAttr "lPinky3.rotateX" -0.0')
mel.eval('setAttr "lPinky3.rotateY" -0.0')
mel.eval('setAttr "lPinky3.rotateZ" 0.0')
mel.eval('setAttr "rPinky3.rotateX" -0.0')
mel.eval('setAttr "rPinky3.rotateY" 0.0')
mel.eval('setAttr "rPinky3.rotateZ" -0.0')
except:
print("Gen2RotsFix...")
# TODO: Remove hardocing
def gen3_rotations_fix():
# ---------------------------------------------------------
try:
mel.eval('setAttr "lShldrBend.rotateX" 0.11')
mel.eval('setAttr "lShldrBend.rotateY" 2.52')
mel.eval('setAttr "lShldrBend.rotateZ" 3.86')
mel.eval('setAttr "rShldrBend.rotateX" 0.11')
mel.eval('setAttr "rShldrBend.rotateY" -2.52')
mel.eval('setAttr "rShldrBend.rotateZ" -3.86')
mel.eval('setAttr "lForearmBend.rotateX" -0.29')
mel.eval('setAttr "lForearmBend.rotateY" 12.71')
mel.eval('setAttr "lForearmBend.rotateZ" -3.95')
mel.eval('setAttr "rForearmBend.rotateX" -0.29')
mel.eval('setAttr "rForearmBend.rotateY" -12.71')
mel.eval('setAttr "rForearmBend.rotateZ" 3.95')
mel.eval('setAttr "lHand.rotateX" 0.03')
mel.eval('setAttr "lHand.rotateY" -14.23')
mel.eval('setAttr "lHand.rotateZ" 1.77')
mel.eval('setAttr "rHand.rotateX" 0.03')
mel.eval('setAttr "rHand.rotateY" 14.23')
mel.eval('setAttr "rHand.rotateZ" -1.77')
mel.eval('setAttr "lThumb1.rotateX" 0.0')
mel.eval('setAttr "lThumb1.rotateY" 0.0')
mel.eval('setAttr "lThumb1.rotateZ" -0.0')
mel.eval('setAttr "rThumb1.rotateX" 0.0')
mel.eval('setAttr "rThumb1.rotateY" -0.0')
mel.eval('setAttr "rThumb1.rotateZ" 0.0')
mel.eval('setAttr "lThumb2.rotateX" 0.0')
mel.eval('setAttr "lThumb2.rotateY" -0.0')
mel.eval('setAttr "lThumb2.rotateZ" 0.0')
mel.eval('setAttr "rThumb2.rotateX" 0.0')
mel.eval('setAttr "rThumb2.rotateY" 0.0')
mel.eval('setAttr "rThumb2.rotateZ" -0.0')
mel.eval('setAttr "lThumb3.rotateX" -0.0')
mel.eval('setAttr "lThumb3.rotateY" -0.0')
mel.eval('setAttr "lThumb3.rotateZ" -0.0')
mel.eval('setAttr "rThumb3.rotateX" -0.0')
mel.eval('setAttr "rThumb3.rotateY" 0.0')
mel.eval('setAttr "rThumb3.rotateZ" 0.0')
mel.eval('setAttr "lCarpal1.rotateX" 0.0')
mel.eval('setAttr "lCarpal1.rotateY" -0.0')
mel.eval('setAttr "lCarpal1.rotateZ" -0.0')
mel.eval('setAttr "rCarpal1.rotateX" 0.0')
mel.eval('setAttr "rCarpal1.rotateY" 0.0')
mel.eval('setAttr "rCarpal1.rotateZ" 0.0')
mel.eval('setAttr "lIndex1.rotateX" -2.07')
mel.eval('setAttr "lIndex1.rotateY" 12.03')
mel.eval('setAttr "lIndex1.rotateZ" 2.8')
mel.eval('setAttr "rIndex1.rotateX" -2.07')
mel.eval('setAttr "rIndex1.rotateY" -12.03')
mel.eval('setAttr "rIndex1.rotateZ" -2.8')
mel.eval('setAttr "lIndex2.rotateX" 0.31')
mel.eval('setAttr "lIndex2.rotateY" -4.02')
mel.eval('setAttr "lIndex2.rotateZ" 1.4')
mel.eval('setAttr "rIndex2.rotateX" 0.31')
mel.eval('setAttr "rIndex2.rotateY" 4.02')
mel.eval('setAttr "rIndex2.rotateZ" -1.4')
mel.eval('setAttr | |
<reponame>louisleroy5/archetypal<filename>archetypal/template/opaque_material.py
################################################################################
# Module: archetypal.template
# Description:
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import collections
import numpy as np
from sigfig import round
from archetypal import log
from archetypal.template import UmiBase, UniqueName
class OpaqueMaterial(UmiBase):
"""Use this component to create a custom opaque material.
.. image:: ../images/template/materials-opaque.png
"""
def __init__(
self,
Name,
Conductivity,
SpecificHeat,
SolarAbsorptance=0.7,
ThermalEmittance=0.9,
VisibleAbsorptance=0.7,
Roughness="Rough",
Cost=0,
Density=1,
MoistureDiffusionResistance=50,
EmbodiedCarbon=0.45,
EmbodiedEnergy=0,
TransportCarbon=0,
TransportDistance=0,
TransportEnergy=0,
SubstitutionRatePattern=None,
SubstitutionTimestep=20,
**kwargs,
):
"""A custom opaque material.
Args:
Name (str): The name of the material.
Conductivity (float): A number representing the conductivity of the
material in W/m-K. This is essentially the heat flow in Watts
across one meter thick of the material when the temperature
difference on either side is 1 Kelvin. Modeling layers with
conductivity higher than 5.0 W/(m-K) is not recommended.
SpecificHeat (float): A number representing the specific heat
capacity of the material in J/kg-K. This is essentially the
number of joules needed to raise one kg of the material by 1
degree Kelvin. Only values of specific heat of 100 or larger are
allowed. Typical ranges are from 800 to 2000 J/(kg-K).
SolarAbsorptance (float): An number between 0 and 1 that represents
the absorptance of solar radiation by the material. The default
is set to 0.7, which is common for most non-metallic materials.
ThermalEmittance (float): An number between 0 and 1 that represents
the thermal absorptance of the material. The default is set to
0.9, which is common for most non-metallic materials. For long
wavelength radiant exchange, thermal emissivity and thermal
emittance are equal to thermal absorptance.
VisibleAbsorptance (float): An number between 0 and 1 that
represents the absorptance of visible light by the material.
The default is set to 0.7, which is common for most non-metallic
materials.
Roughness (str): A text value that indicated the roughness of your
material. This can be either "VeryRough", "Rough",
"MediumRough", "MediumSmooth", "Smooth", and "VerySmooth". The
default is set to "Rough".
Cost: # todo: define parameter
Density (float): A number representing the density of the material
in kg/m3. This is essentially the mass of one cubic meter of the
material.
MoistureDiffusionResistance: # todo: defined parameter
EmbodiedCarbon: # todo: define parameter
EmbodiedEnergy: # todo: define parameter
TransportCarbon: # todo: define parameter
TransportDistance: # todo: define parameter
TransportEnergy: # todo: define parameter
SubstitutionRatePattern: # todo: define parameter
SubstitutionTimestep: # todo: define parameter
**kwargs:
"""
super(OpaqueMaterial, self).__init__(Name, **kwargs)
if SubstitutionRatePattern is None:
SubstitutionRatePattern = [0.5, 1]
self.Conductivity = Conductivity
self.Roughness = Roughness
self.SolarAbsorptance = SolarAbsorptance
self.SpecificHeat = SpecificHeat
self.ThermalEmittance = ThermalEmittance
self.VisibleAbsorptance = VisibleAbsorptance
self.TransportCarbon = TransportCarbon
self.TransportDistance = TransportDistance
self.TransportEnergy = TransportEnergy
self.SubstitutionRatePattern = SubstitutionRatePattern
self.SubstitutionTimestep = SubstitutionTimestep
self.Cost = Cost
self.Density = Density
self.EmbodiedCarbon = EmbodiedCarbon
self.EmbodiedEnergy = EmbodiedEnergy
self.MoistureDiffusionResistance = MoistureDiffusionResistance
@property
def ThermalEmittance(self):
return self._thermal_emittance
@ThermalEmittance.setter
def ThermalEmittance(self, value):
try:
value = float(value)
except ValueError:
value = 0.9 # Use default
finally:
if 9.9999e-6 < value <= 1:
self._thermal_emittance = value
else:
raise ValueError(
f"Out of range value Numeric Field (ThermalEmittance), "
f"value={value}, "
"range={>9.9999E-6 and <=1}, "
f"in MATERIAL={self.Name}"
)
def __add__(self, other):
"""Overload + to implement self.combine.
Args:
other (OpaqueMaterial):
"""
return self.combine(other)
def __hash__(self):
return hash((self.__class__.__name__, getattr(self, "Name", None)))
def __eq__(self, other):
if not isinstance(other, OpaqueMaterial):
return False
else:
return all(
[
self.Conductivity == other.Conductivity,
self.SpecificHeat == other.SpecificHeat,
self.SolarAbsorptance == other.SolarAbsorptance,
self.ThermalEmittance == other.ThermalEmittance,
self.VisibleAbsorptance == other.VisibleAbsorptance,
self.Roughness == other.Roughness,
self.Cost == other.Cost,
self.Density == other.Density,
self.MoistureDiffusionResistance
== self.MoistureDiffusionResistance,
self.EmbodiedCarbon == other.EmbodiedCarbon,
self.EmbodiedEnergy == other.EmbodiedEnergy,
self.TransportCarbon == other.TransportCarbon,
self.TransportDistance == other.TransportDistance,
self.TransportEnergy == other.TransportEnergy,
np.array_equal(
self.SubstitutionRatePattern, other.SubstitutionRatePattern
),
self.SubstitutionTimestep == other.SubstitutionTimestep,
]
)
@classmethod
def generic(cls, idf=None):
"""generic plaster board"""
return cls(
Conductivity=0.16,
SpecificHeat=1090,
Density=800,
Name="GP01 GYPSUM",
Roughness="Smooth",
SolarAbsorptance=0.7,
ThermalEmittance=0.9,
VisibleAbsorptance=0.5,
DataSource="ASHRAE 90.1-2007",
idf=idf,
)
def combine(self, other, weights=None, allow_duplicates=False):
"""Combine two OpaqueMaterial objects.
Args:
weights (list-like, optional): A list-like object of len 2. If None,
the density of the OpaqueMaterial of each objects is used as
a weighting factor.
other (OpaqueMaterial): The other OpaqueMaterial object the
combine with.
Returns:
OpaqueMaterial: A new combined object made of self + other.
"""
# Check if other is the same type as self
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
# Check if other is not the same as self
if self == other:
return self
if not weights:
log(
'using OpaqueMaterial density as weighting factor in "{}" '
"combine.".format(self.__class__.__name__)
)
weights = [self.Density, other.Density]
meta = self._get_predecessors_meta(other)
new_obj = OpaqueMaterial(
**meta,
Conductivity=self._float_mean(other, "Conductivity", weights),
Roughness=self._str_mean(other, attr="Roughness", append=False),
SolarAbsorptance=self._float_mean(other, "SolarAbsorptance", weights),
SpecificHeat=self._float_mean(other, "SpecificHeat"),
ThermalEmittance=self._float_mean(other, "ThermalEmittance", weights),
VisibleAbsorptance=self._float_mean(other, "VisibleAbsorptance", weights),
TransportCarbon=self._float_mean(other, "TransportCarbon", weights),
TransportDistance=self._float_mean(other, "TransportDistance", weights),
TransportEnergy=self._float_mean(other, "TransportEnergy", weights),
SubstitutionRatePattern=self._float_mean(
other, "SubstitutionRatePattern", weights=None
),
SubstitutionTimestep=self._float_mean(
other, "SubstitutionTimestep", weights
),
Cost=self._float_mean(other, "Cost", weights),
Density=self._float_mean(other, "Density", weights),
EmbodiedCarbon=self._float_mean(other, "EmbodiedCarbon", weights),
EmbodiedEnergy=self._float_mean(other, "EmbodiedEnergy", weights),
MoistureDiffusionResistance=self._float_mean(
other, "MoistureDiffusionResistance", weights
),
idf=self.idf,
)
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def to_json(self):
"""Convert class properties to dict"""
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["MoistureDiffusionResistance"] = self.MoistureDiffusionResistance
data_dict["Roughness"] = self.Roughness
data_dict["SolarAbsorptance"] = round(self.SolarAbsorptance, 2)
data_dict["SpecificHeat"] = round(self.SpecificHeat, 4)
data_dict["ThermalEmittance"] = round(self.ThermalEmittance, 2)
data_dict["VisibleAbsorptance"] = round(self.VisibleAbsorptance, 2)
data_dict["Conductivity"] = round(self.Conductivity, 3)
data_dict["Cost"] = self.Cost
data_dict["Density"] = round(self.Density, 4)
data_dict["EmbodiedCarbon"] = self.EmbodiedCarbon
data_dict["EmbodiedEnergy"] = self.EmbodiedEnergy
data_dict["SubstitutionRatePattern"] = self.SubstitutionRatePattern
data_dict["SubstitutionTimestep"] = self.SubstitutionTimestep
data_dict["TransportCarbon"] = self.TransportCarbon
data_dict["TransportDistance"] = self.TransportDistance
data_dict["TransportEnergy"] = self.TransportEnergy
data_dict["Category"] = self.Category
data_dict["Comments"] = self.Comments
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = UniqueName(self.Name)
return data_dict
@classmethod
def from_epbunch(cls, epbunch, **kwargs):
"""Create an OpaqueMaterial from an IDF "Material", "Material:NoMAss",
or "Material:AirGap" element.
Hint:
(From EnergyPlus Manual): When a user enters such a “no mass”
material into EnergyPlus, internally the properties of this layer
are converted to approximate the properties of air (density,
specific heat, and conductivity) with the thickness adjusted to
maintain the user’s desired R-Value. This allowed such layers to be
handled internally in the same way as other layers without any
additional changes to the code. This solution was deemed accurate
enough as air has very little thermal mass and it made the coding of
the state space method simpler.
For Material:AirGap, a similar strategy is used, with the
exception that solar properties (solar and visible absorptance and
emittance) are assumed null.
Args:
epbunch (EpBunch): EP-Construction object
**kwargs:
"""
if epbunch.key.upper() == "MATERIAL":
# do MATERIAL
Name = epbunch.Name
Conductivity = epbunch.Conductivity
Density = epbunch.Density
Roughness = epbunch.Roughness
SolarAbsorptance = epbunch.Solar_Absorptance
SpecificHeat = epbunch.Specific_Heat
ThermalEmittance = epbunch.Thermal_Absorptance
VisibleAbsorptance = epbunch.Visible_Absorptance
Thickness = epbunch.Thickness
return cls(
Conductivity=Conductivity,
Density=Density,
Roughness=Roughness,
SolarAbsorptance=SolarAbsorptance,
SpecificHeat=SpecificHeat,
ThermalEmittance=ThermalEmittance,
VisibleAbsorptance=VisibleAbsorptance,
Thickness=Thickness,
Name=Name,
idf=epbunch.theidf,
**kwargs,
)
elif epbunch.key.upper() == "MATERIAL:NOMASS":
# do MATERIAL:NOMASS. Assume properties of air.
Name = epbunch.Name
Conductivity = 0.02436 # W/mK, dry air at 0 °C and 100 kPa.
Density = 1.2754 # dry air at 0 °C and 100 kPa.
SpecificHeat = 100.5 # J/kg-K, dry air at 0 °C and 100 kPa.
Thickness = Conductivity * epbunch.Thermal_Resistance
Roughness = epbunch.Roughness
SolarAbsorptance = epbunch.Solar_Absorptance
ThermalEmittance = epbunch.Thermal_Absorptance
VisibleAbsorptance = epbunch.Visible_Absorptance
return cls(
Conductivity=Conductivity,
Density=Density,
Roughness=Roughness,
SolarAbsorptance=SolarAbsorptance,
SpecificHeat=SpecificHeat,
ThermalEmittance=ThermalEmittance,
VisibleAbsorptance=VisibleAbsorptance,
Thickness=Thickness,
Name=Name,
idf=epbunch.theidf,
**kwargs,
)
elif epbunch.key.upper() == "MATERIAL:AIRGAP":
gas_prop = {
"AIR": dict(
Conductivity=0.02436,
Density=1.754,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"ARGON": dict(
Conductivity=0.016,
Density=1.784,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"KRYPTON": dict(
Conductivity=0.0088,
Density=3.749,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"XENON": dict(
Conductivity=0.0051,
Density=5.761,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
"SF6": dict(
Conductivity=0.001345,
Density=6.17,
SpecificHeat=1000,
ThermalEmittance=0.001,
),
}
for gasname, properties in gas_prop.items():
if gasname.lower() in epbunch.Name.lower():
thickness = properties["Conductivity"] * epbunch.Thermal_Resistance
return cls(
Name=epbunch.Name,
Thickness=thickness,
**properties,
idf=epbunch.theidf,
)
else:
thickness = (
gas_prop["AIR"]["Conductivity"] * epbunch.Thermal_Resistance
)
return cls(
Name=epbunch.Name,
Thickness=thickness,
**gas_prop["AIR"],
idf=epbunch.theidf,
)
else:
raise NotImplementedError(
"Material '{}' of type '{}' is not yet "
"supported. Please contact package "
"authors".format(epbunch.Name, epbunch.key)
)
def validate(self):
"""Validates UmiObjects and fills in missing values"""
# Some OpaqueMaterial don't have a default value, therefore an empty string is
# parsed. This breaks the UmiTemplate Editor, therefore we set a value on these
# attributes (if necessary) | |
<gh_stars>0
# Copyright 2019 <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions """
import pathlib
import re
import subprocess
import tempfile
import typing
from compilers.llvm import llvm
from compilers.llvm import llvm_as
from compilers.llvm import opt
from labm8.py import app
from labm8.py import fs
FLAGS = app.FLAGS
def DotCallGraphFromBytecode(bytecode: str) -> str:
"""Create a call graph from an LLVM bytecode file.
Args:
bytecode: The LLVM bytecode to create call graphfrom.
Returns:
A dotfile string.
Raises:
OptException: In case the opt pass fails.
UnicodeDecodeError: If generated dotfile can't be read.
"""
with tempfile.TemporaryDirectory(prefix="phd_") as d:
output_dir = pathlib.Path(d)
# Change into the output directory, because the -dot-callgraph pass writes
# to the current working directory.
with fs.chdir(output_dir):
# We run with universal_newlines=False because the stdout of opt is the
# binary bitcode, which we completely ignore (we're only interested in
# stderr). This means we must encode stdin and decode stderr ourselves.
process = opt.Exec(
["-dot-callgraph"],
stdin=bytecode.encode("utf-8"),
universal_newlines=False,
log=False,
)
stderr = process.stderr.decode("utf-8")
# Propagate failures from opt as OptExceptions.
if process.returncode:
raise opt.OptException(returncode=process.returncode, stderr=stderr)
callgraph = output_dir / "callgraph.dot"
if not callgraph.is_file():
raise OSError(f"Callgraph dotfile not produced")
return fs.Read(callgraph)
def DotControlFlowGraphsFromBytecode(bytecode: str) -> typing.Iterator[str]:
"""Create a control flow graph from an LLVM bytecode file.
Args:
bytecode: The LLVM bytecode to create CFG dots from.
Returns:
An iterator of dotfile strings.
Raises:
OptException: In case the opt pass fails.
UnicodeDecodeError: If generated dotfile can't be read.
"""
with tempfile.TemporaryDirectory(prefix="phd_") as d:
output_dir = pathlib.Path(d)
# Change into the output directory, because the -dot-cfg pass writes files
# to the current working directory.
with fs.chdir(output_dir):
# We run with universal_newlines=False because the stdout of opt is the
# binary bitcode, which we completely ignore (we're only interested in
# stderr). This means we must encode stdin and decode stderr ourselves.
process = opt.Exec(
["-dot-cfg"],
stdin=bytecode.encode("utf-8"),
universal_newlines=False,
log=False,
)
stderr = process.stderr.decode("utf-8")
# Propagate failures from opt as OptExceptions.
if process.returncode:
raise opt.OptException(returncode=process.returncode, stderr=stderr)
for file in output_dir.iterdir():
# Opt pass prints the name of the dot files it generates, e.g.:
#
# $ opt -dot-cfg < foo.ll
# WARNING: You're attempting to print out a bitcode file.
# This is inadvisable as it may cause display problems. If
# you REALLY want to taste LLVM bitcode first-hand, you
# can force output with the `-f' option.
#
# Writing 'cfg.DoSomething.dot'...
# Writing 'cfg.main.dot'...
if f"Writing '{file.name}'..." not in stderr:
raise OSError(
f"Could not find reference to file '{file.name}' in "
f"opt stderr:\n{process.stderr}"
)
with open(file) as f:
yield f.read()
def DotGraphsFromBytecode(
bytecode: str,
opt_args: typing.List[str],
opt_path: str = None,
output_pred: typing.Callable[[str], bool] = None,
) -> typing.Tuple[typing.List[str], typing.List[str]]:
"""Obtain dot graphs from an LLVM bytecode file using an opt pass.
Args:
bytecode: The LLVM bytecode to create the graphs from.
opt_args: A list of arguments to the opt tool that generate the graphs.
opt_path: The path to a custom opt binary. Overrides the default version.
output_pred: A predicate that receives an output file name, and returns
True if it should be collected in the first part of the result tuple,
or False if it should be collected in the second tuple element.
If None, all outputs are collected to the first element.
Returns:
A 2-tuple of lists of graphs as dot strings.
Raises:
OptException: In case the opt pass fails.
UnicodeDecodeError: If generated dotfile can't be read.
"""
graph_dots_true = []
graph_dots_false = []
with tempfile.TemporaryDirectory(prefix="phd_") as d:
output_dir = pathlib.Path(d)
# Change into the output directory, because the -dot-callgraph pass writes
# to the current working directory.
with fs.chdir(output_dir):
# We run with universal_newlines=False because the stdout of opt is the
# binary bitcode, which we completely ignore (we're only interested in
# stderr). This means we must encode stdin and decode stderr ourselves.
process = opt.Exec(
opt_args,
stdin=bytecode.encode("utf-8"),
universal_newlines=False,
log=False,
opt=opt_path,
)
stderr = process.stderr.decode("utf-8")
# Propagate failures from opt as OptExceptions.
if process.returncode:
raise opt.OptException(returncode=process.returncode, stderr=stderr)
for file in output_dir.iterdir():
# Opt pass prints the name of the dot files it generates, e.g.:
#
# $ opt -dot-cfg < foo.ll
# WARNING: You're attempting to print out a bitcode file.
# This is inadvisable as it may cause display problems. If
# you REALLY want to taste LLVM bitcode first-hand, you
# can force output with the `-f' option.
#
# Writing 'cfg.DoSomething.dot'...
# Writing 'cfg.main.dot'...
if f"Writing '{file.name}'..." not in stderr:
raise OSError(
f"Could not find reference to file '{file.name}' in "
f"opt stderr:\n{process.stderr}"
)
if not output_pred or output_pred(file.name):
graph_dots_true.append(fs.Read(file))
else:
graph_dots_false.append(fs.Read(file))
return graph_dots_true, graph_dots_false
def DotCallGraphAndControlFlowGraphsFromBytecode(
bytecode: str, opt_path: str = None
) -> typing.Tuple[str, typing.List[str]]:
"""Create call graph and control flow graphs from an LLVM bytecode file.
When both a call graph and CFGs are required, calling this function is
marginally faster than calling DotControlFlowGraphsFromBytecode() and
DotCallGraphFromBytecode() separately.
Args:
bytecode: The LLVM bytecode to create call graph and CFGs from.
opt_path: The path to a custom opt binary. Overrides the default version.
Returns:
A tuple, where the first element is the call graph dot string, and the
second element is a list of control flow graph dot strings.
Raises:
OptException: In case the opt pass fails.
UnicodeDecodeError: If generated dotfile can't be read.
"""
control_flow_graph_dots, callgraph_dots = DotGraphsFromBytecode(
bytecode,
["-metarenamer", "-dot-cfg", "-dot-callgraph"],
opt_path,
lambda name: name != "callgraph.dot",
)
if len(callgraph_dots) != 1:
raise OSError(f"Callgraph dotfile not produced")
callgraph = callgraph_dots[0]
return callgraph, control_flow_graph_dots
def GetOptArgs(
cflags: typing.Optional[typing.List[str]] = None,
) -> typing.List[typing.List[str]]:
"""Get the arguments passed to opt.
Args:
cflags: The cflags passed to clang. Defaults to -O0.
Returns:
A list of invocation arguments.
"""
cflags = cflags or ["-O0"]
p1 = subprocess.Popen(
[llvm_as.LLVM_AS], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE
)
p2 = subprocess.Popen(
[opt.OPT, "-disable-output", "-debug-pass=Arguments"] + cflags,
stdin=p1.stdout,
stderr=subprocess.PIPE,
universal_newlines=True,
)
_, stderr = p2.communicate()
if p2.returncode:
raise llvm.LlvmError(stderr)
args = []
for line in stderr.rstrip().split("\n"):
if not line.startswith("Pass Arguments:"):
raise llvm.LlvmError(f"Cannot interpret line: {line}")
line = line[len("Pass Arguments:") :]
args.append(line.split())
for arg in args[-1]:
if not arg[0] == "-":
raise llvm.LlvmError(f"Cannot interpret clang argument: {arg}")
return args
class Pointer(typing.NamedTuple):
"""A pointer in an alias set."""
type: str
identifier: str
size: int
class AliasSet(typing.NamedTuple):
# From https://llvm.org/doxygen/AliasSetTracker_8h_source.html
#
# /// The kind of alias relationship between pointers of the set.
# ///
# /// These represent conservatively correct alias results between any members
# /// of the set. We represent these independently of the values of alias
# /// results in order to pack it into a single bit. Lattice goes from
# /// MustAlias to MayAlias.
# enum AliasLattice {
# SetMustAlias = 0, SetMayAlias = 1
# };
type: str # str, one of {must alias, may alias}
# From https://llvm.org/doxygen/AliasSetTracker_8h_source.html
#
# /// The kinds of access this alias set models.
# ///
# /// We keep track of whether this alias set merely refers to the locations of
# /// memory (and not any particular access), whether it modifies or references
# /// the memory, or whether it does both. The lattice goes from "NoAccess" to
# /// either RefAccess or ModAccess, then to ModRefAccess as necessary.
# enum AccessLattice {
# NoAccess = 0,
# RefAccess = 1,
# ModAccess = 2,
# ModRefAccess = RefAccess | ModAccess
# };
mod_ref: str # str, one of {Mod,Ref,Mod/Ref}
pointers: typing.List[Pointer]
def GetAliasSetsByFunction(
bytecode: str,
) -> typing.Dict[str, typing.List[AliasSet]]:
"""Get the alias sets of a bytecode.
Args:
bytecode: An LLVM bytecode.
Returns:
A dictionary of alias sets, keyed by the function name.
Raises:
OptException: In | |
#!/usr/bin/python3
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keyboard Status Monitor.
Monitors one or more keyboards and mouses.
Shows their status graphically.
"""
__author__ = '<NAME> (<EMAIL>)'
__version__ = '1.50'
import logging
import os
import sys
import time
import gettext
import cairo
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
from gi.repository import \
GLib, \
Gdk, \
GdkPixbuf, \
Gtk
from keymon import xlib
from keymon import options
from keymon import lazy_pixbuf_creator
from keymon import mod_mapper
from keymon import settings
from keymon import shaped_window
from keymon import two_state_image
gettext.install('key-mon', 'locale')
def fix_svg_key_closure(fname, from_tos):
"""Create a closure to modify the key.
Args:
from_tos: list of from, to pairs for search replace.
Returns:
A bound function which returns the contents of file fname with modifications.
"""
from_tos = tuple((a.encode(), b.encode()) for a, b in from_tos)
def fix_svg_key():
"""Given an SVG file return the SVG text fixed."""
logging.debug('Read file %r', fname)
fbytes = open(fname, "rb").read()
for fin, t in from_tos:
# Quick XML escape fix
t = t.replace(b'<', b'<')
fbytes = fbytes.replace(fin, t)
#end for
return fbytes
#end fix_svg_key
#begin fix_svg_key_closure
return fix_svg_key
#end fix_svg_key_closure
class KeyMon:
"""main KeyMon window class."""
# Fixme: all connects to instance methods as callbacks should
# use a weak ref to self to avoid reference circularity.
def __init__(self, options):
"""Create the Key Mon window.
Options dict:
scale: float 1.0 is default which means normal size.
meta: boolean show the meta (windows key)
kbd_file: string Use the kbd file given.
emulate_middle: Emulate the middle mouse button.
theme: Name of the theme to use to draw keys
"""
def create_window():
# creates the main window.
def create_images():
self.images['MOUSE'] = two_state_image.TwoStateImage(self.pixbufs, 'MOUSE', False)
for img in self.MODS:
self.images[img] = two_state_image.TwoStateImage \
(
pixbufs = self.pixbufs,
normal = img + '_EMPTY',
is_modifier = True,
show = self.enabled[img]
)
#end for
self.create_buttons()
#end create_images
def add_events():
# add events for the window to listen to.
self.window.connect('destroy', self.destroy)
self.window.connect('button-press-event', self.button_pressed)
self.window.connect('button-release-event', self.button_released)
self.window.connect('leave-notify-event', self.pointer_leave)
self.event_box.connect('button_release_event', self.right_click_handler)
accelgroup = Gtk.AccelGroup()
key, modifier = Gtk.accelerator_parse('<Control>q')
accelgroup.connect(key, modifier, Gtk.AccelFlags.VISIBLE, self.quit_program)
key, modifier = Gtk.accelerator_parse('<Control>s')
accelgroup.connect(key, modifier, Gtk.AccelFlags.VISIBLE, self.show_settings_dlg)
self.window.add_accel_group(accelgroup)
if self.options.screenshot:
GLib.timeout_add(700, self.do_screenshot)
return
#end if
GLib.idle_add(self.on_idle)
#end add_events
#begin create_window
self.window = Gtk.Window()
self.window.set_resizable(False)
self.window.set_title('Keyboard Status Monitor')
width, height = 30 * self.options.scale, 48 * self.options.scale
self.window.set_default_size(round(width), round(height))
self.window.set_decorated(self.options.decorated)
self.mouse_indicator_win = shaped_window.ShapedWindow \
(
self.svg_name('mouse-indicator'),
timeout=self.options.visible_click_timeout
)
self.mouse_follower_win = shaped_window.ShapedWindow \
(
self.svg_name('mouse-follower')
)
if self.options.follow_mouse:
self.mouse_follower_win.show()
#end if
self.window_style_provider = Gtk.CssProvider()
self.window.get_style_context() \
.add_provider(self.window_style_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
self.set_window_opacity(self.options.opacity)
self.window.set_keep_above(True)
self.event_box = Gtk.EventBox()
self.window.add(self.event_box)
self.event_box.show()
create_images()
self.hbox = Gtk.HBox(homogeneous = False, spacing = 0)
self.event_box.add(self.hbox)
self.layout_boxes()
self.hbox.show()
add_events()
self.set_accept_focus(False)
self.window.set_skip_taskbar_hint(True)
old_x = self.options.x_pos
old_y = self.options.y_pos
if old_x != -1 and old_y != -1 and old_x and old_y:
self.window.move(old_x, old_y)
#end if
self.window.show()
self.update_shape_mask(force=True)
#end create_window
#begin __init__
settings.SettingsDialog.register()
self.btns = \
[
'MOUSE',
'BTN_RIGHT',
'BTN_MIDDLE',
'BTN_MIDDLERIGHT',
'BTN_LEFT',
'BTN_LEFTRIGHT',
'BTN_LEFTMIDDLE',
'BTN_LEFTMIDDLERIGHT',
]
self.options = options
self.pathname = os.path.dirname(os.path.abspath(__file__))
if self.options.scale < 1.0:
self.svg_size = '-small'
else:
self.svg_size = ''
#end if
# Make lint happy by defining these.
self.hbox = None
self.window = None
self.event_box = None
self.mouse_indicator_win = None
self.key_image = None
self.buttons = None
self.no_press_timer = None
self.move_dragged = False
self.shape_mask_current = None
self.shape_mask_cache = {}
self.MODS = ['SHIFT', 'CTRL', 'META', 'ALT']
self.IMAGES = ['MOUSE'] + self.MODS
self.images = dict([(img, None) for img in self.IMAGES])
self.enabled = dict([(img, self.get_option(img.lower())) for img in self.IMAGES])
self.options.kbd_files = settings.get_kbd_files()
self.modmap = mod_mapper.safely_read_mod_map(self.options.kbd_file, self.options.kbd_files)
self.name_fnames = self.create_names_to_fnames()
self.devices = xlib.XEvents()
self.devices.start()
self.pixbufs = lazy_pixbuf_creator.LazyPixbufCreator \
(
name_fnames = self.name_fnames,
resize = self.options.scale
)
create_window()
self.fade_lock = 0
self.reset_no_press_timer()
#end __init__
def get_option(self, attr):
"""Shorthand for getattr(self.options, attr)"""
return getattr(self.options, attr)
#end get_option
def do_screenshot(self):
"""Create a screenshot showing some keys."""
for key in self.options.screenshot.split(','):
try:
if key == 'KEY_EMPTY':
continue
if key.startswith('KEY_'):
key_info = self.modmap.get_from_name(key)
if not key_info:
print('Key %s not found' % key)
self.destroy(None)
return
#end if
scancode = key_info[0]
event = xlib.XEvent('EV_KEY', scancode=scancode, code=key, value=1)
elif key.startswith('BTN_'):
event = xlib.XEvent('EV_KEY', scancode=0, code=key, value=1)
else :
event = None
#end if
if event != None :
self.handle_event(event)
#end if
while GLib.main_context_default().pending():
GLib.main_context_default().iteration(False)
#end while
time.sleep(0.1)
except Exception as exp:
print(exp)
#end try
#end for
while GLib.main_context_default().pending():
GLib.main_context_default().iteration(False)
#end while
time.sleep(0.1)
win = self.window
x, y = win.get_position()
w, h = win.get_size()
screenshot = Gdk.pixbuf_get_from_window(win.get_property("window"), 0, 0, w, h)
fname = 'screenshot.png'
screenshot.savev(fname, 'png', '', '')
print('Saved screenshot %r' % fname)
self.destroy(None)
#end do_screenshot
def create_names_to_fnames(self):
"""Give a name to images."""
if self.options.scale < 1.0:
self.svg_size = '-small'
else:
self.svg_size = ''
#end if
ftn = \
{
'MOUSE': [self.svg_name('mouse'),],
'BTN_MIDDLE': [self.svg_name('mouse'), self.svg_name('middle-mouse')],
'SCROLL_UP': [self.svg_name('mouse'), self.svg_name('scroll-up-mouse')],
'SCROLL_DOWN': [self.svg_name('mouse'), self.svg_name('scroll-dn-mouse')],
'REL_LEFT': [self.svg_name('mouse'), self.svg_name('scroll-lft-mouse')],
'REL_RIGHT': [self.svg_name('mouse'), self.svg_name('scroll-rgt-mouse')],
'SHIFT': [self.svg_name('shift')],
'SHIFT_EMPTY': [self.svg_name('shift'), self.svg_name('whiteout-72')],
'CTRL': [self.svg_name('ctrl')],
'CTRL_EMPTY': [self.svg_name('ctrl'), self.svg_name('whiteout-58')],
'META': [self.svg_name('meta'), self.svg_name('meta')],
'META_EMPTY': [self.svg_name('meta'), self.svg_name('whiteout-58')],
'ALT': [self.svg_name('alt')],
'ALT_EMPTY': [self.svg_name('alt'), self.svg_name('whiteout-58')],
'ALTGR': [self.svg_name('altgr')],
'ALTGR_EMPTY': [self.svg_name('altgr'), self.svg_name('whiteout-58')],
'KEY_EMPTY':
[
fix_svg_key_closure(self.svg_name('one-char-template'), [('&', '')]),
self.svg_name('whiteout-48'),
],
'BTN_LEFTRIGHT':
[
self.svg_name('mouse'), self.svg_name('left-mouse'),
self.svg_name('right-mouse'),
],
'BTN_LEFTMIDDLERIGHT':
[
self.svg_name('mouse'), self.svg_name('left-mouse'),
self.svg_name('middle-mouse'), self.svg_name('right-mouse'),
],
}
if self.options.swap_buttons:
# swap the meaning of left and right
left_str = 'right'
right_str = 'left'
else:
left_str = 'left'
right_str = 'right'
#end if
ftn.update \
(
{
'BTN_RIGHT':
[
self.svg_name('mouse'),
self.svg_name('%s-mouse' % right_str),
],
'BTN_LEFT':
[
self.svg_name('mouse'),
self.svg_name('%s-mouse' % left_str)
],
'BTN_LEFTMIDDLE':
[
self.svg_name('mouse'),
self.svg_name('%s-mouse' % left_str),
self.svg_name('middle-mouse'),
],
'BTN_MIDDLERIGHT':
[
self.svg_name('mouse'),
self.svg_name('middle-mouse'),
self.svg_name('%s-mouse' % right_str),
],
}
)
if self.options.scale >= 1.0:
ftn.update \
(
{
'KEY_SPACE':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Space'), ('BOTTOM', '')]
),
],
'KEY_TAB':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Tab'), ('BOTTOM', u'\u21B9')]
)
],
'KEY_BACKSPACE':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Back'), ('BOTTOM', u'\u21fd')]
)
],
'KEY_RETURN':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Enter'), ('BOTTOM', u'\u23CE')]
)
],
'KEY_CAPS_LOCK':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Capslock'), ('BOTTOM', '')]
)
],
'KEY_MULTI_KEY':
[
fix_svg_key_closure
(
self.svg_name('two-line-wide'),
[('TOP', 'Compose'), ('BOTTOM', '')]
)
],
}
)
else:
ftn.update \
(
{
'KEY_SPACE':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Space')]),
],
'KEY_TAB':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Tab')]),
],
'KEY_BACKSPACE':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Back')]),
],
'KEY_RETURN':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Enter')]),
],
'KEY_CAPS_LOCK':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Capslck')]),
],
'KEY_MULTI_KEY':
[
fix_svg_key_closure(self.svg_name('one-line-wide'), [('&', 'Compose')]),
],
}
)
#end if
return ftn
#end create_names_to_fnames
def set_window_opacity(self, opacity) :
self.last_window_opacity = opacity
style = \
(
"window\n"
" {\n"
" opacity : %(opacity).3f;\n"
" }\n"
%
{
"opacity" : opacity,
}
)
self.window_style_provider.load_from_data(style.encode())
#end set_window_opacity
def update_shape_mask(self, *unused_args, **kwargs):
"recalculates backgroundless/backgrounded window region."
gdk_window = self.window.get_property("window")
if gdk_window == None :
return
if self.options.backgroundless > self.options.decorated :
# cannot be backgroundless if window decoration is visible
force = kwargs.get('force', False)
btns = [btn for btn in self.buttons if btn.get_visible()]
# Generate id to see if current mask needs to be updated, which is a tuple
# of allocation of buttons.
cache_id = tuple \
(
(a.x, a.y, a.width, a.height)
for btn in btns
for a in (btn.get_allocation(),)
)
if cache_id == self.shape_mask_current and not force:
return
# Try to find existing mask in cache
# TODO limit number of cached masks
shape_mask = self.shape_mask_cache.get(cache_id, None)
if shape_mask and not force:
self.window.get_property("window").shape_combine_region(shape_mask, 0, 0)
self.shape_mask_current = cache_id
return
#end if
alloc = self.window.get_allocation()
width = alloc.width
height = alloc.height
masks = \
[
Gdk.cairo_surface_create_from_pixbuf(self.pixbufs.get(btn.current), 1, None)
for btn in btns
]
shape_mask = cairo.ImageSurface(cairo.Format.ARGB32, width, height)
# not bothering to do equivalent of masks[0].get_depth()
gc = cairo.Context(shape_mask)
# Initialize the mask just in case masks of buttons can't fill the window,
# if that happens, some artifacts will be seen usually at right edge.
gc.set_source_rgba \
(
*(4 * ((1, 0)[self.options.backgroundless],))
)
gc.new_path()
gc.rectangle(0, 0, width, height)
gc.fill()
for btn_allocation, mask in zip(cache_id, masks):
# Don't create mask until every image | |
<gh_stars>0
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: steammessages_clientserver.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
from .steammessages_base import CMsgAuthTicket, CMsgIpAddress
class EmmsLobbyStatus(betterproto.Enum):
Invalid = 0
Exists = 1
DoesNotExist = 2
NotAMember = 3
@dataclass(eq=False, repr=False)
class CMsgClientUdsp2PSessionStarted(betterproto.Message):
steamid_remote: int = betterproto.fixed64_field(1)
appid: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientUdsp2PSessionEnded(betterproto.Message):
steamid_remote: int = betterproto.fixed64_field(1)
appid: int = betterproto.int32_field(2)
session_length_sec: int = betterproto.int32_field(3)
session_error: int = betterproto.int32_field(4)
nattype: int = betterproto.int32_field(5)
bytes_recv: int = betterproto.int32_field(6)
bytes_sent: int = betterproto.int32_field(7)
bytes_sent_relay: int = betterproto.int32_field(8)
bytes_recv_relay: int = betterproto.int32_field(9)
time_to_connect_ms: int = betterproto.int32_field(10)
@dataclass(eq=False, repr=False)
class CMsgClientRegisterAuthTicketWithCm(betterproto.Message):
protocol_version: int = betterproto.uint32_field(1)
ticket: bytes = betterproto.bytes_field(3)
client_instance_id: int = betterproto.uint64_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientTicketAuthComplete(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
game_id: int = betterproto.fixed64_field(2)
estate: int = betterproto.uint32_field(3)
eauth_session_response: int = betterproto.uint32_field(4)
deprecated_ticket: bytes = betterproto.bytes_field(5)
ticket_crc: int = betterproto.uint32_field(6)
ticket_sequence: int = betterproto.uint32_field(7)
owner_steam_id: int = betterproto.fixed64_field(8)
@dataclass(eq=False, repr=False)
class CMsgClientCmList(betterproto.Message):
cm_addresses: List[int] = betterproto.uint32_field(1)
cm_ports: List[int] = betterproto.uint32_field(2)
cm_websocket_addresses: List[str] = betterproto.string_field(3)
percent_default_to_websocket: int = betterproto.uint32_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientP2PConnectionInfo(betterproto.Message):
steam_id_dest: int = betterproto.fixed64_field(1)
steam_id_src: int = betterproto.fixed64_field(2)
app_id: int = betterproto.uint32_field(3)
candidate: bytes = betterproto.bytes_field(4)
legacy_connection_id_src: int = betterproto.fixed64_field(5)
rendezvous: bytes = betterproto.bytes_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientP2PConnectionFailInfo(betterproto.Message):
steam_id_dest: int = betterproto.fixed64_field(1)
steam_id_src: int = betterproto.fixed64_field(2)
app_id: int = betterproto.uint32_field(3)
ep2_p_session_error: int = betterproto.uint32_field(4)
connection_id_dest: int = betterproto.fixed64_field(5)
close_reason: int = betterproto.uint32_field(7)
close_message: str = betterproto.string_field(8)
@dataclass(eq=False, repr=False)
class CMsgClientNetworkingCertRequest(betterproto.Message):
key_data: bytes = betterproto.bytes_field(2)
app_id: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientNetworkingCertReply(betterproto.Message):
cert: bytes = betterproto.bytes_field(4)
ca_key_id: int = betterproto.fixed64_field(5)
ca_signature: bytes = betterproto.bytes_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientNetworkingMobileCertRequest(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientNetworkingMobileCertReply(betterproto.Message):
encoded_cert: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientGetAppOwnershipTicket(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientGetAppOwnershipTicketResponse(betterproto.Message):
eresult: int = betterproto.uint32_field(1)
app_id: int = betterproto.uint32_field(2)
ticket: bytes = betterproto.bytes_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientSessionToken(betterproto.Message):
token: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientGameConnectTokens(betterproto.Message):
max_tokens_to_keep: int = betterproto.uint32_field(1)
tokens: List[bytes] = betterproto.bytes_field(2)
@dataclass(eq=False, repr=False)
class CMsgGsServerType(betterproto.Message):
app_id_served: int = betterproto.uint32_field(1)
flags: int = betterproto.uint32_field(2)
deprecated_game_ip_address: int = betterproto.uint32_field(3)
game_port: int = betterproto.uint32_field(4)
game_dir: str = betterproto.string_field(5)
game_version: str = betterproto.string_field(6)
game_query_port: int = betterproto.uint32_field(7)
@dataclass(eq=False, repr=False)
class CMsgGsStatusReply(betterproto.Message):
is_secure: bool = betterproto.bool_field(1)
@dataclass(eq=False, repr=False)
class CMsgGsPlayerList(betterproto.Message):
players: List["CMsgGsPlayerListPlayer"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class CMsgGsPlayerListPlayer(betterproto.Message):
steam_id: int = betterproto.uint64_field(1)
deprecated_public_ip: int = betterproto.uint32_field(2)
token: bytes = betterproto.bytes_field(3)
public_ip: "CMsgIpAddress" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class CMsgGsUserPlaying(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
deprecated_public_ip: int = betterproto.uint32_field(2)
token: bytes = betterproto.bytes_field(3)
public_ip: "CMsgIpAddress" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class CMsgGsDisconnectNotice(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientGamesPlayed(betterproto.Message):
games_played: List["CMsgClientGamesPlayedGamePlayed"] = betterproto.message_field(1)
client_os_type: int = betterproto.uint32_field(2)
cloud_gaming_platform: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientGamesPlayedGamePlayed(betterproto.Message):
steam_id_gs: int = betterproto.uint64_field(1)
game_id: int = betterproto.fixed64_field(2)
deprecated_game_ip_address: int = betterproto.uint32_field(3)
game_port: int = betterproto.uint32_field(4)
is_secure: bool = betterproto.bool_field(5)
token: bytes = betterproto.bytes_field(6)
game_extra_info: str = betterproto.string_field(7)
game_data_blob: bytes = betterproto.bytes_field(8)
process_id: int = betterproto.uint32_field(9)
streaming_provider_id: int = betterproto.uint32_field(10)
game_flags: int = betterproto.uint32_field(11)
owner_id: int = betterproto.uint32_field(12)
vr_hmd_vendor: str = betterproto.string_field(13)
vr_hmd_model: str = betterproto.string_field(14)
launch_option_type: int = betterproto.uint32_field(15)
primary_controller_type: int = betterproto.int32_field(16)
primary_steam_controller_serial: str = betterproto.string_field(17)
total_steam_controller_count: int = betterproto.uint32_field(18)
total_non_steam_controller_count: int = betterproto.uint32_field(19)
controller_workshop_file_id: int = betterproto.uint64_field(20)
launch_source: int = betterproto.uint32_field(21)
vr_hmd_runtime: int = betterproto.uint32_field(22)
game_ip_address: "CMsgIpAddress" = betterproto.message_field(23)
controller_connection_type: int = betterproto.uint32_field(24)
@dataclass(eq=False, repr=False)
class CMsgGsApprove(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
owner_steam_id: int = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgGsDeny(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
edeny_reason: int = betterproto.int32_field(2)
deny_string: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class CMsgGsKick(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
edeny_reason: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientAuthList(betterproto.Message):
tokens_left: int = betterproto.uint32_field(1)
last_request_seq: int = betterproto.uint32_field(2)
last_request_seq_from_server: int = betterproto.uint32_field(3)
tickets: List["CMsgAuthTicket"] = betterproto.message_field(4)
app_ids: List[int] = betterproto.uint32_field(5)
message_sequence: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientAuthListAck(betterproto.Message):
ticket_crc: List[int] = betterproto.uint32_field(1)
app_ids: List[int] = betterproto.uint32_field(2)
message_sequence: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientLicenseList(betterproto.Message):
eresult: int = betterproto.int32_field(1)
licenses: List["CMsgClientLicenseListLicense"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientLicenseListLicense(betterproto.Message):
package_id: int = betterproto.uint32_field(1)
time_created: int = betterproto.fixed32_field(2)
time_next_process: int = betterproto.fixed32_field(3)
minute_limit: int = betterproto.int32_field(4)
minutes_used: int = betterproto.int32_field(5)
payment_method: int = betterproto.uint32_field(6)
flags: int = betterproto.uint32_field(7)
purchase_country_code: str = betterproto.string_field(8)
license_type: int = betterproto.uint32_field(9)
territory_code: int = betterproto.int32_field(10)
change_number: int = betterproto.int32_field(11)
owner_id: int = betterproto.uint32_field(12)
initial_period: int = betterproto.uint32_field(13)
initial_time_unit: int = betterproto.uint32_field(14)
renewal_period: int = betterproto.uint32_field(15)
renewal_time_unit: int = betterproto.uint32_field(16)
access_token: int = betterproto.uint64_field(17)
master_package_id: int = betterproto.uint32_field(18)
@dataclass(eq=False, repr=False)
class CMsgClientLbsSetScore(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
leaderboard_id: int = betterproto.int32_field(2)
score: int = betterproto.int32_field(3)
details: bytes = betterproto.bytes_field(4)
upload_score_method: int = betterproto.int32_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientLbsSetScoreResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
leaderboard_entry_count: int = betterproto.int32_field(2)
score_changed: bool = betterproto.bool_field(3)
global_rank_previous: int = betterproto.int32_field(4)
global_rank_new: int = betterproto.int32_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientLbsSetUgc(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
leaderboard_id: int = betterproto.int32_field(2)
ugc_id: int = betterproto.fixed64_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientLbsSetUgcResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientLbsFindOrCreateLb(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
leaderboard_sort_method: int = betterproto.int32_field(2)
leaderboard_display_type: int = betterproto.int32_field(3)
create_if_not_found: bool = betterproto.bool_field(4)
leaderboard_name: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientLbsFindOrCreateLbResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
leaderboard_id: int = betterproto.int32_field(2)
leaderboard_entry_count: int = betterproto.int32_field(3)
leaderboard_sort_method: int = betterproto.int32_field(4)
leaderboard_display_type: int = betterproto.int32_field(5)
leaderboard_name: str = betterproto.string_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientLbsGetLbEntries(betterproto.Message):
app_id: int = betterproto.int32_field(1)
leaderboard_id: int = betterproto.int32_field(2)
range_start: int = betterproto.int32_field(3)
range_end: int = betterproto.int32_field(4)
leaderboard_data_request: int = betterproto.int32_field(5)
steamids: List[int] = betterproto.fixed64_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientLbsGetLbEntriesResponse(betterproto.Message):
eresult: int = betterproto.int32_field(1)
leaderboard_entry_count: int = betterproto.int32_field(2)
entries: List["CMsgClientLbsGetLbEntriesResponseEntry"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientLbsGetLbEntriesResponseEntry(betterproto.Message):
steam_id_user: int = betterproto.fixed64_field(1)
global_rank: int = betterproto.int32_field(2)
score: int = betterproto.int32_field(3)
details: bytes = betterproto.bytes_field(4)
ugc_id: int = betterproto.fixed64_field(5)
@dataclass(eq=False, repr=False)
class CMsgClientIsLimitedAccount(betterproto.Message):
bis_limited_account: bool = betterproto.bool_field(1)
bis_community_banned: bool = betterproto.bool_field(2)
bis_locked_account: bool = betterproto.bool_field(3)
bis_limited_account_allowed_to_invite_friends: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientRequestedClientStats(betterproto.Message):
stats_to_send: List["CMsgClientRequestedClientStatsStatsToSend"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientRequestedClientStatsStatsToSend(betterproto.Message):
client_stat: int = betterproto.uint32_field(1)
stat_aggregate_method: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientStat2(betterproto.Message):
stat_detail: List["CMsgClientStat2StatDetail"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class CMsgClientStat2StatDetail(betterproto.Message):
client_stat: int = betterproto.uint32_field(1)
ll_value: int = betterproto.int64_field(2)
time_of_day: int = betterproto.uint32_field(3)
cell_id: int = betterproto.uint32_field(4)
depot_id: int = betterproto.uint32_field(5)
app_id: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CMsgClientMmsSetRatelimitPolicyOnClient(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
enable_rate_limits: bool = betterproto.bool_field(2)
seconds_per_message: int = betterproto.int32_field(3)
milliseconds_per_data_update: int = betterproto.int32_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientMmsCreateLobby(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
max_members: int = betterproto.int32_field(2)
lobby_type: int = betterproto.int32_field(3)
lobby_flags: int = betterproto.int32_field(4)
cell_id: int = betterproto.uint32_field(5)
deprecated_public_ip: int = betterproto.uint32_field(6)
metadata: bytes = betterproto.bytes_field(7)
persona_name_owner: str = betterproto.string_field(8)
public_ip: "CMsgIpAddress" = betterproto.message_field(9)
@dataclass(eq=False, repr=False)
class CMsgClientMmsCreateLobbyResponse(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
eresult: int = betterproto.int32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsJoinLobby(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
persona_name: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsJoinLobbyResponse(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
chat_room_enter_response: int = betterproto.int32_field(3)
max_members: int = betterproto.int32_field(4)
lobby_type: int = betterproto.int32_field(5)
lobby_flags: int = betterproto.int32_field(6)
steam_id_owner: int = betterproto.fixed64_field(7)
metadata: bytes = betterproto.bytes_field(8)
members: List["CMsgClientMmsJoinLobbyResponseMember"] = betterproto.message_field(9)
@dataclass(eq=False, repr=False)
class CMsgClientMmsJoinLobbyResponseMember(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
persona_name: str = betterproto.string_field(2)
metadata: bytes = betterproto.bytes_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsLeaveLobby(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientMmsLeaveLobbyResponse(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
eresult: int = betterproto.int32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsGetLobbyList(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
num_lobbies_requested: int = betterproto.int32_field(3)
cell_id: int = betterproto.uint32_field(4)
deprecated_public_ip: int = betterproto.uint32_field(5)
filters: List["CMsgClientMmsGetLobbyListFilter"] = betterproto.message_field(6)
public_ip: "CMsgIpAddress" = betterproto.message_field(7)
@dataclass(eq=False, repr=False)
class CMsgClientMmsGetLobbyListFilter(betterproto.Message):
key: str = betterproto.string_field(1)
value: str = betterproto.string_field(2)
comparision: int = betterproto.int32_field(3)
filter_type: int = betterproto.int32_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientMmsGetLobbyListResponse(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
eresult: int = betterproto.int32_field(3)
lobbies: List["CMsgClientMmsGetLobbyListResponseLobby"] = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientMmsGetLobbyListResponseLobby(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
max_members: int = betterproto.int32_field(2)
lobby_type: int = betterproto.int32_field(3)
lobby_flags: int = betterproto.int32_field(4)
metadata: bytes = betterproto.bytes_field(5)
num_members: int = betterproto.int32_field(6)
distance: float = betterproto.float_field(7)
weight: int = betterproto.int64_field(8)
@dataclass(eq=False, repr=False)
class CMsgClientMmsSetLobbyData(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
steam_id_member: int = betterproto.fixed64_field(3)
max_members: int = betterproto.int32_field(4)
lobby_type: int = betterproto.int32_field(5)
lobby_flags: int = betterproto.int32_field(6)
metadata: bytes = betterproto.bytes_field(7)
@dataclass(eq=False, repr=False)
class CMsgClientMmsSetLobbyDataResponse(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
eresult: int = betterproto.int32_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsGetLobbyData(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
@dataclass(eq=False, repr=False)
class CMsgClientMmsLobbyData(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
num_members: int = betterproto.int32_field(3)
max_members: int = betterproto.int32_field(4)
lobby_type: int = betterproto.int32_field(5)
lobby_flags: int = betterproto.int32_field(6)
steam_id_owner: int = betterproto.fixed64_field(7)
metadata: bytes = betterproto.bytes_field(8)
members: List["CMsgClientMmsLobbyDataMember"] = betterproto.message_field(9)
lobby_cellid: int = betterproto.uint32_field(10)
owner_should_accept_changes: bool = betterproto.bool_field(11)
@dataclass(eq=False, repr=False)
class CMsgClientMmsLobbyDataMember(betterproto.Message):
steam_id: int = betterproto.fixed64_field(1)
persona_name: str = betterproto.string_field(2)
metadata: bytes = betterproto.bytes_field(3)
@dataclass(eq=False, repr=False)
class CMsgClientMmsSendLobbyChatMsg(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
steam_id_target: int = betterproto.fixed64_field(3)
lobby_message: bytes = betterproto.bytes_field(4)
@dataclass(eq=False, repr=False)
class CMsgClientMmsLobbyChatMsg(betterproto.Message):
app_id: int = betterproto.uint32_field(1)
steam_id_lobby: int = betterproto.fixed64_field(2)
steam_id_sender: int = betterproto.fixed64_field(3)
lobby_message: bytes = betterproto.bytes_field(4)
@dataclass(eq=False, | |
255),
'gray38': (97, 97, 97, 255),
'darkorange4': (139, 69, 0, 255),
'mintcream': (245, 255, 250, 255),
'darkorange1': (255, 127, 0, 255),
'antiquewhite': (250, 235, 215, 255),
'darkorange2': (238, 118, 0, 255),
'grey18': (46, 46, 46, 255),
'grey19': (48, 48, 48, 255),
'grey38': (97, 97, 97, 255),
'moccasin': (255, 228, 181, 255),
'grey10': (26, 26, 26, 255),
'chocolate1': (255, 127, 36, 255),
'chocolate2': (238, 118, 33, 255),
'chocolate3': (205, 102, 29, 255),
'saddlebrown': (139, 69, 19, 255),
'grey15': (38, 38, 38, 255),
'darkslateblue': (72, 61, 139, 255),
'lightskyblue': (135, 206, 250, 255),
'gray69': (176, 176, 176, 255),
'gray68': (173, 173, 173, 255),
'deeppink': (255, 20, 147, 255),
'gray65': (166, 166, 166, 255),
'gray64': (163, 163, 163, 255),
'gray67': (171, 171, 171, 255),
'gray66': (168, 168, 168, 255),
'gray25': (64, 64, 64, 255),
'coral': (255, 127, 80, 255),
'gray63': (161, 161, 161, 255),
'gray62': (158, 158, 158, 255),
'goldenrod4': (139, 105, 20, 255),
'grey35': (89, 89, 89, 255),
'gray89': (227, 227, 227, 255),
'goldenrod1': (255, 193, 37, 255),
'goldenrod2': (238, 180, 34, 255),
'goldenrod3': (205, 155, 29, 255),
'springgreen1': (0, 255, 127, 255),
'springgreen2': (0, 238, 118, 255),
'springgreen3': (0, 205, 102, 255),
'springgreen4': (0, 139, 69, 255),
'mistyrose1': (255, 228, 225, 255),
'sandybrown': (244, 164, 96, 255),
'grey30': (77, 77, 77, 255),
'seashell2': (238, 229, 222, 255),
'seashell3': (205, 197, 191, 255),
'tan': (210, 180, 140, 255),
'seashell1': (255, 245, 238, 255),
'mistyrose3': (205, 183, 181, 255),
'magenta': (255, 0, 255, 255),
'pink': (255, 192, 203, 255),
'ivory2': (238, 238, 224, 255),
'ivory1': (255, 255, 240, 255),
'lightcyan2': (209, 238, 238, 255),
'mediumseagreen': (60, 179, 113, 255),
'ivory4': (139, 139, 131, 255),
'darkorange': (255, 140, 0, 255),
'powderblue': (176, 224, 230, 255),
'dodgerblue1': (30, 144, 255, 255),
'gray95': (242, 242, 242, 255),
'firebrick1': (255, 48, 48, 255),
'gray7': (18, 18, 18, 255),
'mistyrose4': (139, 125, 123, 255),
'tomato': (255, 99, 71, 255),
'indianred2': (238, 99, 99, 255),
'steelblue2': (92, 172, 238, 255),
'gray100': (255, 255, 255, 255),
'seashell4': (139, 134, 130, 255),
'grey89': (227, 227, 227, 255),
'grey88': (224, 224, 224, 255),
'grey87': (222, 222, 222, 255),
'grey86': (219, 219, 219, 255),
'grey85': (217, 217, 217, 255),
'grey84': (214, 214, 214, 255),
'midnightblue': (25, 25, 112, 255),
'grey82': (209, 209, 209, 255),
'grey81': (207, 207, 207, 255),
'yellow3': (205, 205, 0, 255),
'ivory3': (205, 205, 193, 255),
'grey22': (56, 56, 56, 255),
'gray85': (217, 217, 217, 255),
'violetred3': (205, 50, 120, 255),
'dodgerblue2': (28, 134, 238, 255),
'gray42': (107, 107, 107, 255),
'sienna2': (238, 121, 66, 255),
'grey72': (184, 184, 184, 255),
'grey73': (186, 186, 186, 255),
'grey70': (179, 179, 179, 255),
'palevioletred': (219, 112, 147, 255),
'lightslategray': (119, 136, 153, 255),
'grey77': (196, 196, 196, 255),
'grey74': (189, 189, 189, 255),
'slategray1': (198, 226, 255, 255),
'pink1': (255, 181, 197, 255),
'mediumpurple1': (171, 130, 255, 255),
'pink3': (205, 145, 158, 255),
'antiquewhite4': (139, 131, 120, 255),
'lightpink1': (255, 174, 185, 255),
'honeydew2': (224, 238, 224, 255),
'khaki4': (139, 134, 78, 255),
'darkolivegreen4': (110, 139, 61, 255),
'gray45': (115, 115, 115, 255),
'slategray3': (159, 182, 205, 255),
'darkolivegreen1': (202, 255, 112, 255),
'khaki1': (255, 246, 143, 255),
'khaki2': (238, 230, 133, 255),
'khaki3': (205, 198, 115, 255),
'lavenderblush': (255, 240, 245, 255),
'honeydew4': (131, 139, 131, 255),
'salmon3': (205, 112, 84, 255),
'salmon2': (238, 130, 98, 255),
'gray92': (235, 235, 235, 255),
'salmon4': (139, 76, 57, 255),
'gray49': (125, 125, 125, 255),
'gray48': (122, 122, 122, 255),
'linen': (250, 240, 230, 255),
'burlywood1': (255, 211, 155, 255),
'green': (0, 255, 0, 255),
'gray47': (120, 120, 120, 255),
'blueviolet': (138, 43, 226, 255),
'brown2': (238, 59, 59, 255),
'brown3': (205, 51, 51, 255),
'peachpuff': (255, 218, 185, 255),
'brown4': (139, 35, 35, 255),
'firebrick4': (139, 26, 26, 255),
'azure1': (240, 255, 255, 255),
'azure3': (193, 205, 205, 255),
'azure2': (224, 238, 238, 255),
'azure4': (131, 139, 139, 255),
'tomato4': (139, 54, 38, 255),
'orange4': (139, 90, 0, 255),
'firebrick': (178, 34, 34, 255),
'indianred': (205, 92, 92, 255),
'orange1': (255, 165, 0, 255),
'orange3': (205, 133, 0, 255),
'orange2': (238, 154, 0, 255),
'darkolivegreen': (85, 107, 47, 255),
'gray2': (5, 5, 5, 255),
'slategrey': (112, 128, 144, 255),
'gray81': (207, 207, 207, 255),
'darkred': (139, 0, 0, 255),
'gray3': (8, 8, 8, 255),
'lightsteelblue1': (202, 225, 255, 255),
'lightsteelblue2': (188, 210, 238, 255),
'lightsteelblue3': (162, 181, 205, 255),
'lightsteelblue4': (110, 123, 139, 255),
'tomato3': (205, 79, 57, 255),
'gray43': (110, 110, 110, 255),
'darkgoldenrod4': (139, 101, 8, 255),
'grey50': (127, 127, 127, 255),
'yellow4': (139, 139, 0, 255),
'mediumorchid': (186, 85, 211, 255),
'yellow2': (238, 238, 0, 255),
'darkgoldenrod2': (238, 173, 14, 255),
'darkgoldenrod3': (205, 149, 12, 255),
'chartreuse': (127, 255, 0, 255),
'mediumblue': (0, 0, 205, 255),
'gray4': (10, 10, 10, 255),
'springgreen': (0, 255, 127, 255),
'orange': (255, 165, 0, 255),
'gray5': (13, 13, 13, 255),
'lightsalmon': (255, 160, 122, 255),
'gray19': (48, 48, 48, 255),
'turquoise': (64, 224, 208, 255),
'lightseagreen': (32, 178, 170, 255),
'grey8': (20, 20, 20, 255),
'grey9': (23, 23, 23, 255),
'grey6': (15, 15, 15, 255),
'grey7': (18, 18, 18, 255),
'grey4': (10, 10, 10, 255),
'grey5': (13, 13, 13, 255),
'grey2': (5, 5, 5, 255),
'grey3': (8, 8, 8, 255),
'grey0': (0, 0, 0, 255),
'grey1': (3, 3, 3, 255),
'gray50': (127, 127, 127, 255),
'goldenrod': (218, 165, 32, 255),
'grey58': (148, 148, 148, 255),
'grey59': (150, 150, 150, 255),
'gray51': (130, 130, 130, 255),
'grey54': (138, 138, 138, 255),
'mediumorchid4': (122, 55, 139, 255),
'grey56': (143, 143, 143, 255),
'navajowhite3': (205, 179, 139, 255),
'mediumorchid1': (224, 102, 255, 255),
'grey51': (130, 130, 130, 255),
'mediumorchid3': (180, 82, 205, 255),
'mediumorchid2': (209, 95, 238, 255),
'cyan2': (0, 238, 238, 255),
'cyan3': (0, 205, 205, 255),
'gray23': (59, 59, 59, 255),
'cyan1': (0, 255, 255, 255),
'darkgreen': (0, 100, 0, 255),
'gray24': (61, 61, 61, 255),
'cyan4': (0, 139, 139, 255),
'darkviolet': (148, 0, 211, 255),
'peachpuff4': (139, 119, 101, 255),
'gray28': (71, 71, 71, 255),
'slateblue4': (71, 60, 139, 255),
'slateblue3': (105, 89, 205, 255),
'peachpuff1': (255, 218, 185, 255),
'peachpuff2': (238, 203, 173, 255),
'peachpuff3': (205, 175, 149, 255),
'gray29': (74, 74, 74, 255),
'paleturquoise': (175, 238, 238, 255),
'darkgray': (169, 169, 169, 255),
'grey25': (64, 64, 64, 255),
'darkmagenta': (139, 0, 139, 255),
'palegoldenrod': (238, 232, 170, 255),
'grey64': (163, 163, 163, 255),
'grey12': (31, 31, 31, 255),
'deeppink3': (205, 16, 118, 255),
'gray79': (201, 201, 201, 255),
'gray83': (212, 212, 212, 255),
'deeppink2': (238, 18, 137, 255),
'burlywood4': (139, 115, 85, 255),
'palevioletred4': (139, 71, 93, 255),
'deeppink1': (255, 20, 147, 255),
'slateblue2': (122, 103, 238, 255),
'grey46': (117, 117, 117, 255),
'royalblue4': (39, 64, 139, 255),
'yellowgreen': (154, 205, 50, 255),
'royalblue1': (72, 118, 255, 255),
'slateblue1': (131, 111, 255, 255),
'lightgoldenrod3': (205, 190, 112, 255),
'lightgoldenrod2': (238, 220, 130, 255),
'navy': (0, 0, 128, 255),
'orchid': (218, 112, 214, 255),
'ghostwhite': (248, 248, 255, 255),
'purple': (160, 32, 240, 255),
'darkkhaki': (189, 183, 107, 255),
'grey45': (115, 115, 115, 255),
'gray94': (240, 240, 240, 255),
'wheat4': (139, 126, 102, 255),
'gray96': (245, 245, 245, 255),
'gray97': (247, 247, 247, 255),
'wheat1': (255, 231, 186, 255),
'gray91': (232, 232, 232, 255),
'wheat3': (205, 186, 150, 255),
'wheat2': (238, 216, 174, 255),
'indianred4': (139, 58, 58, 255),
'coral2': (238, 106, 80, 255),
'coral1': (255, 114, 86, 255),
'violetred': (208, 32, 144, 255),
'rosybrown3': (205, 155, 155, 255),
'deepskyblue2': (0, 178, 238, 255),
'deepskyblue1': (0, 191, 255, 255),
'bisque': (255, 228, 196, 255),
'grey49': (125, 125, 125, 255),
'khaki': (240, 230, 140, 255),
'wheat': (245, 222, 179, 255),
'lightslateblue': (132, 112, 255, 255),
'mediumpurple3': (137, 104, 205, 255),
'gray55': (140, 140, 140, 255),
'deepskyblue': (0, 191, 255, 255),
'gray98': (250, 250, 250, 255),
'steelblue': (70, 130, 180, 255),
'aliceblue': (240, 248, 255, | |
"""
This module pre-processes the CE-CT acquisitions and associated segmentations and generates
a DataFrame tracking the file paths of the pre-processed items, stored as NumPy ndarrays.
This module is to be run from the top-level data-processing directory using the -m flag as follows:
Usage:
$ python3 -m luna.preprocess_feature --spark_master_uri {spark_master_uri} --base_directory {directory/to/tables} --destination_directory {directory/to/where/feature/table/to/be/outputted} --target_spacing {x_spacing} {y_spacing} {z_spacing} --query "{sql where clause}" --feature_table_output_name {name-of-table-to-be-created} --custom_preprocessing_script {path/to/preprocessing/script}
Parameters:
REQUIRED PARAMETERS:
--target_spacing: target spacing for the x,y,and z dimensions
--spark_master_uri: spark master uri e.g. spark://master-ip:7077 or local[*]
OPTIONAL PARAMETERS:
--base_directory: path to write feature table and files. We assume scan/annotation refined tables are at a specific path on gpfs.
--destination_directory: specify where feature table should be written, if not specified, will default to writing feature table to base_directory
--query: where clause of SQL query to filter feature tablE. WHERE does not need to be included, make sure to wrap with quotes to be interpretted correctly
- Queriable Columns to Filter By:
SeriesInstanceUID
annotation_record_uuid
annotation_absolute_hdfs_path
annotation_filename
annotation_type
annotation_payload_number
annotation_absolute_hdfs_host
scan_record_uuid
scan_absolute_hdfs_path
scan_filename
scan_type
scan_absolute_hdfs_host
scan_payload_number
preprocessed_annotation_path
preprocessed_scan_path
preprocessed_target_spacing_x
preprocessed_target_spacing_y
preprocessed_target_spacing_z
feature_record_uuid
- examples:
- filtering by feature_record_uuid: --query "feature_record_uuid='123' or feature_record_uuid='456'"
- filtering by SeriesInstanceUID: --query "SeriesInstanceUID = '123456abc'"
--feature_table_output_name: name of feature table that is created, default is feature-table,
feature table will be created at {base_directory}/tables/features/{feature_table_output_name}
--custom_preprocessing_script: path to preprocessing script containing "process_patient" function. By default, uses process_patient_default() function for preprocessing
Example:
$ python -m luna.preprocess_feature --spark_master_uri local[*] --base_directory /gpfs/mskmindhdp_emc/user/pateld6/data-processing/test-tables/ --destination_directory /gpfs/mskmindhdp_emc/user/pateld6/data-processing/feature-tables/ --target_spacing 1.0 1.0 3.0 --query "SeriesInstanceUID = '123456abc'" --feature_table_output_name brca-feature-table --custom_preprocessing_script /gpfs/mskmindhdp_emc/user/pateld6/data-processing/tests/test_external_process_patient_script.py
"""
import os, sys, subprocess, time,importlib
import click
from luna.common.CodeTimer import CodeTimer
from luna.common.Neo4jConnection import Neo4jConnection
from luna.common.config import ConfigSet
from luna.common.sparksession import SparkConfig
from luna.common.custom_logger import init_logger
import luna.common.constants as const
import numpy as np
os.environ['OPENBLAS_NUM_THREADS'] = '1'
import pandas as pd
from joblib import Parallel, delayed
from medpy.io import load
from skimage.transform import resize
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf, lit, expr
from pyspark.sql.types import StringType
logger = init_logger()
APP_CFG='APP_CFG'
def generate_absolute_path_from_hdfs(absolute_hdfs_path_col, filename_col):
# do we need this if?
if absolute_hdfs_path_col[0] == '/':
absolute_hdfs_path_col = absolute_hdfs_path_col[1:]
return os.path.join(os.getenv('BASE_DIR'), absolute_hdfs_path_col, filename_col)
def process_patient_default(patient: pd.DataFrame) -> pd.DataFrame:
"""
Given a row with source and destination file paths for a single case, resamples segmentation
and acquisition. Also, clips acquisition range to abdominal window.
:param case_row: pandas DataFrame row with fields "preprocessed_seg_path" and "preprocessed_img_path"
:return: None
"""
scan_absolute_hdfs_path = generate_absolute_path_from_hdfs(patient.scan_absolute_hdfs_path.item(), patient.scan_filename.item())
print("scan path", scan_absolute_hdfs_path)
annotation_absolute_hdfs_path = generate_absolute_path_from_hdfs(patient.annotation_absolute_hdfs_path.item(), patient.annotation_filename.item())
print("annot path", annotation_absolute_hdfs_path)
preprocessed_scan_path = patient.preprocessed_scan_path.item()
preprocessed_annotation_path = patient.preprocessed_annotation_path.item()
target_spacing = (patient.preprocessed_target_spacing_x, patient.preprocessed_target_spacing_y, patient.preprocessed_target_spacing_z)
if os.path.exists(preprocessed_scan_path) and os.path.exists(preprocessed_annotation_path):
print(preprocessed_scan_path + " and " + preprocessed_annotation_path + " already exists.")
logger.warning(preprocessed_scan_path + " and " + preprocessed_annotation_path + " already exists.")
return patient
if not os.path.exists(scan_absolute_hdfs_path):
print(scan_absolute_hdfs_path + " does not exist.")
logger.warning(scan_absolute_hdfs_path + " does not exist.")
patient['preprocessed_scan_path'] = ""
patient['preprocessed_annotation_path'] = ""
return patient
if not os.path.exists(annotation_absolute_hdfs_path):
print(annotation_absolute_hdfs_path + " does not exist.")
logger.warning(annotation_absolute_hdfs_path + " does not exist.")
patient['preprocessed_scan_path'] = ""
patient['preprocessed_annotation_path'] = ""
return patient
try:
img, img_header = load(scan_absolute_hdfs_path)
target_shape = calculate_target_shape(img, img_header, target_spacing)
img = resample_volume(img, 3, target_shape)
np.save(preprocessed_scan_path, img)
logger.info("saved img at " + preprocessed_scan_path)
print("saved img at " + preprocessed_scan_path)
seg, _ = load(annotation_absolute_hdfs_path)
seg = interpolate_segmentation_masks(seg, target_shape)
np.save(preprocessed_annotation_path, seg)
logger.info("saved seg at " + preprocessed_annotation_path)
print("saved seg at " + preprocessed_annotation_path)
except Exception as err:
logger.warning("failed to generate resampled volume.", err)
print("failed to generate resampled volume.", err)
patient['preprocessed_scan_path'] = ""
patient['preprocessed_annotation_path'] = ""
return patient
def interpolate_segmentation_masks(seg, target_shape):
"""
Use NN interpolation for segmentation masks by resampling boolean masks for each value present.
:param seg: as numpy.ndarray
:param target_shape: as tuple or list
:return: new segmentation as numpy.ndarray
"""
new_seg = np.zeros(target_shape).astype(int)
for roi in np.unique(seg):
if roi == 0:
continue
mask = resample_volume(seg == roi, 0, target_shape).astype(bool)
new_seg[mask] = int(roi)
return new_seg
def generate_preprocessed_filename(id, suffix, processed_dir):
"""
Generates target NumPy file path for preprocessed segmentation or acquisition.
:param idx: case ID
:param suffix: _seg or _img, depending on which Series is being populated.
:param processed_dir: path to save .npy files.
:param target_spacing_x target x-dimension spacing
:param target_spacing_y target y-dimension spacing
:param target_spacing_z target z-dimension spacing
:return: target file path
"""
file_name = "".join((processed_dir, str(id), suffix, ".npy"))
return file_name
def calculate_target_shape(volume, header, target_spacing):
"""
:param volume: as numpy.ndarray
:param header: ITK-SNAP header
:param target_spacing: as tuple or list
:return: target_shape as list
"""
src_spacing = header.get_voxel_spacing()
target_shape = [int(src_d * src_sp / tar_sp) for src_d, src_sp, tar_sp in
zip(volume.shape, src_spacing, target_spacing)]
return target_shape
def resample_volume(volume, order, target_shape):
"""
Resamples volume using order specified.
:param volume: as numpy.ndarray
:param order: 0 for NN (for segmentation), 3 for cubic (recommended for acquisition)
:param target_shape: as tuple or list
:return: Resampled volume as numpy.ndarray
"""
if order == 0:
anti_alias = False
else:
anti_alias = True
volume = resize(volume, target_shape,
order=order, clip=True, mode='edge',
preserve_range=True, anti_aliasing=anti_alias)
return volume
def get_dmp_from_scan(conn, query_id):
result = [ x.data() for x in conn.query(f"MATCH (patient:dmp_patient_id)-[:PX_TO_RAD]-(rad)-[:HAS_SCAN]-(scan) WHERE scan.value='{query_id}' RETURN patient, rad, scan") ]
if len(result) >= 1:
return result[0]['patient']['value']
return ""
@click.command()
@click.option('-q', '--query', default = None,
help = "where clause of SQL query to filter feature table, 'WHERE' does not need to be included, "
"but make sure to wrap with quotes to be interpretted correctly")
@click.option('-b', '--base_directory', type=click.Path(exists=True), required=True,
help="location to find scan/annotation tables")
@click.option('-d', '--destination_directory', type=click.Path(exists=False), default="",
help="location where feature table should be written. If not specified, destination_directory "
"defaults to base directory")
@click.option('-t', '--target_spacing', nargs=3, type=float, required=True,
help="target spacing for x,y and z dimensions")
@click.option('-n', '--feature_table_output_name', default="feature-table",
help="name of new feature table that is created.")
@click.option('-c', '--custom_preprocessing_script', default = None,
help="Path to python file containing custom 'process_patient' method. By default,"
" uses process_patient_default() for preprocessing")
@click.option('-f', '--config_file', default = 'config.yaml',
help="path to config file containing application configuration. See config.yaml.template")
def cli(base_directory,
destination_directory,
target_spacing,
query,
feature_table_output_name,
custom_preprocessing_script,
config_file):
"""
This module pre-processes the CE-CT acquisitions and associated segmentations and generates
a DataFrame tracking the file paths of the pre-processed items, stored as NumPy ndarrays.
Example: python preprocess_feature.py --base_directory {directory/to/tables} --target_spacing {x_spacing} {y_spacing} {z_spacing} --query "{sql where clause}" --feature_table_output_name {name-of-table-to-be-created}
"""
if len(destination_directory) == 0:
destination_directory = base_directory
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
with CodeTimer(logger, 'generate feature table'):
# setup env vars from arguments
os.environ['BASE_DIR'] = base_directory
# Setup Spark context
ConfigSet(name=APP_CFG, config_file=config_file)
spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="preprocess_feature")
generate_feature_table(base_directory,
destination_directory,
target_spacing,
spark,
query,
feature_table_output_name,
custom_preprocessing_script)
def generate_feature_table(base_directory, destination_directory, target_spacing, spark, query, feature_table_output_name, custom_preprocessing_script):
try:
assert(os.getenv('BASE_DIR') != None)
except AssertionError as err:
logger.exception("env var BASE_DIR has not been set!")
raise err
annotation_table = os.path.join(base_directory, const.SCAN_ANNOTATION_TABLE)
scan_table = os.path.join(base_directory, const.SCAN_TABLE)
feature_table = os.path.join(destination_directory, "tables/"+str(feature_table_output_name)+"/")
feature_files = os.path.join(destination_directory, "features/"+str(feature_table_output_name)+"/")
# Load Annotation table and rename columns before merge
annot_df = spark.read.format("delta").load(annotation_table)
rename_annotation_columns = ["absolute_hdfs_path", "absolute_hdfs_host", "filename", "type","payload_number"]
for col in rename_annotation_columns:
annot_df = annot_df.withColumnRenamed(col,("annotation_"+col))
annot_df.show(truncate=False)
# Load Scan Table, filter by mhd [no zraw] and rename columns for merging
scan_df = spark.read.format("delta").load(scan_table)
rename_scan_columns = ["absolute_hdfs_path", "absolute_hdfs_host", "filename", "type","payload_number", "item_number"]
for col in rename_scan_columns:
scan_df = scan_df.withColumnRenamed(col,("scan_"+col))
scan_df.createOrReplaceTempView("scan")
scan_df = spark.sql("SELECT * from scan where scan_type='.mhd'")
scan_df.show(truncate=False)
# join scan and annotation tables
generate_preprocessed_filename_udf = udf(generate_preprocessed_filename, StringType())
df = annot_df.join(scan_df, ['SeriesInstanceUID'])
df = df.withColumn("preprocessed_annotation_path", lit(generate_preprocessed_filename_udf(df.annotation_record_uuid, lit('_annotation'), lit(feature_files) )))
df = df.withColumn("preprocessed_scan_path", lit(generate_preprocessed_filename_udf(df.scan_record_uuid, lit('_scan'), lit(feature_files) )))
# Add target spacing individually so they can be extracted during row processing
df = df.withColumn("preprocessed_target_spacing_x", lit(target_spacing[0]))
df = df.withColumn("preprocessed_target_spacing_y", lit(target_spacing[1]))
df = df.withColumn("preprocessed_target_spacing_z", lit(target_spacing[2]))
df = df.withColumn("feature_record_uuid", expr("uuid()"))
df.show(truncate=False)
# sql processing on joined table if specified
if query:
sql_query = "SELECT * from feature where " + str(query)
df.createOrReplaceTempView("feature")
try:
df = spark.sql(sql_query)
except Exception as err:
logger.error("Exception while running spark sql query \"{}\"".format(sql_query), err)
return
# If query doesn't return anything, do not proceed.
if df.count() == 0:
err_msg = "query \"{}\" has no match. Please revisit your query.".format(sql_query)
logger.error(err_msg)
return
# Resample segmentation and images
if not os.path.exists(feature_files):
os.makedirs(feature_files)
# Preprocess Features Using Pandas DF and applyInPandas() [Apache Arrow]:
if custom_preprocessing_script:
# use external preprocessing script
# add path to python os sys
sys.path.append(os.path.dirname(custom_preprocessing_script))
# import python file containing process_patient method (without the .py | |
<gh_stars>10-100
# Copyright (c) 2014-2019 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and/or associated documentation files (the "Materials"),
# to deal in the Materials without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Materials, and to permit persons to whom the
# Materials are furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Materials.
#
# MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
# STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
# HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
# IN THE MATERIALS.
# This header is automatically generated by the same tool that creates
# the Binary Section of the SPIR-V specification.
# Enumeration tokens for SPIR-V, in various styles:
# C, C++, C++11, JSON, Lua, Python, C#, D
#
# - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
# - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
# - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
# - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
# - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
# - C# will use enum classes in the Specification class located in the "Spv" namespace,
# e.g.: Spv.Specification.SourceLanguage.GLSL
# - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL
#
# Some tokens act like mask values, which can be OR'd together,
# while others are mutually exclusive. The mask-like ones have
# "Mask" in their name, and a parallel enum that has the shift
# amount (1 << x) for each corresponding enumerant.
spv = {
"MagicNumber": 0x07230203,
"Version": 0x00010500,
"Revision": 1,
"OpCodeMask": 0xFFFF,
"WordCountShift": 16,
"SourceLanguage": {
"Unknown": 0,
"ESSL": 1,
"GLSL": 2,
"OpenCL_C": 3,
"OpenCL_CPP": 4,
"HLSL": 5,
},
"ExecutionModel": {
"Vertex": 0,
"TessellationControl": 1,
"TessellationEvaluation": 2,
"Geometry": 3,
"Fragment": 4,
"GLCompute": 5,
"Kernel": 6,
"TaskNV": 5267,
"MeshNV": 5268,
"RayGenerationNV": 5313,
"IntersectionNV": 5314,
"AnyHitNV": 5315,
"ClosestHitNV": 5316,
"MissNV": 5317,
"CallableNV": 5318,
},
"AddressingModel": {
"Logical": 0,
"Physical32": 1,
"Physical64": 2,
"PhysicalStorageBuffer64": 5348,
"PhysicalStorageBuffer64EXT": 5348,
},
"MemoryModel": {
"Simple": 0,
"GLSL450": 1,
"OpenCL": 2,
"Vulkan": 3,
"VulkanKHR": 3,
},
"ExecutionMode": {
"Invocations": 0,
"SpacingEqual": 1,
"SpacingFractionalEven": 2,
"SpacingFractionalOdd": 3,
"VertexOrderCw": 4,
"VertexOrderCcw": 5,
"PixelCenterInteger": 6,
"OriginUpperLeft": 7,
"OriginLowerLeft": 8,
"EarlyFragmentTests": 9,
"PointMode": 10,
"Xfb": 11,
"DepthReplacing": 12,
"DepthGreater": 14,
"DepthLess": 15,
"DepthUnchanged": 16,
"LocalSize": 17,
"LocalSizeHint": 18,
"InputPoints": 19,
"InputLines": 20,
"InputLinesAdjacency": 21,
"Triangles": 22,
"InputTrianglesAdjacency": 23,
"Quads": 24,
"Isolines": 25,
"OutputVertices": 26,
"OutputPoints": 27,
"OutputLineStrip": 28,
"OutputTriangleStrip": 29,
"VecTypeHint": 30,
"ContractionOff": 31,
"Initializer": 33,
"Finalizer": 34,
"SubgroupSize": 35,
"SubgroupsPerWorkgroup": 36,
"SubgroupsPerWorkgroupId": 37,
"LocalSizeId": 38,
"LocalSizeHintId": 39,
"PostDepthCoverage": 4446,
"DenormPreserve": 4459,
"DenormFlushToZero": 4460,
"SignedZeroInfNanPreserve": 4461,
"RoundingModeRTE": 4462,
"RoundingModeRTZ": 4463,
"StencilRefReplacingEXT": 5027,
"OutputLinesNV": 5269,
"OutputPrimitivesNV": 5270,
"DerivativeGroupQuadsNV": 5289,
"DerivativeGroupLinearNV": 5290,
"OutputTrianglesNV": 5298,
"PixelInterlockOrderedEXT": 5366,
"PixelInterlockUnorderedEXT": 5367,
"SampleInterlockOrderedEXT": 5368,
"SampleInterlockUnorderedEXT": 5369,
"ShadingRateInterlockOrderedEXT": 5370,
"ShadingRateInterlockUnorderedEXT": 5371,
},
"StorageClass": {
"UniformConstant": 0,
"Input": 1,
"Uniform": 2,
"Output": 3,
"Workgroup": 4,
"CrossWorkgroup": 5,
"Private": 6,
"Function": 7,
"Generic": 8,
"PushConstant": 9,
"AtomicCounter": 10,
"Image": 11,
"StorageBuffer": 12,
"CallableDataNV": 5328,
"IncomingCallableDataNV": 5329,
"RayPayloadNV": 5338,
"HitAttributeNV": 5339,
"IncomingRayPayloadNV": 5342,
"ShaderRecordBufferNV": 5343,
"PhysicalStorageBuffer": 5349,
"PhysicalStorageBufferEXT": 5349,
},
"Dim": {
"Dim1D": 0,
"Dim2D": 1,
"Dim3D": 2,
"Cube": 3,
"Rect": 4,
"Buffer": 5,
"SubpassData": 6,
},
"SamplerAddressingMode": {
"None": 0,
"ClampToEdge": 1,
"Clamp": 2,
"Repeat": 3,
"RepeatMirrored": 4,
},
"SamplerFilterMode": {"Nearest": 0, "Linear": 1},
"ImageFormat": {
"Unknown": 0,
"Rgba32f": 1,
"Rgba16f": 2,
"R32f": 3,
"Rgba8": 4,
"Rgba8Snorm": 5,
"Rg32f": 6,
"Rg16f": 7,
"R11fG11fB10f": 8,
"R16f": 9,
"Rgba16": 10,
"Rgb10A2": 11,
"Rg16": 12,
"Rg8": 13,
"R16": 14,
"R8": 15,
"Rgba16Snorm": 16,
"Rg16Snorm": 17,
"Rg8Snorm": 18,
"R16Snorm": 19,
"R8Snorm": 20,
"Rgba32i": 21,
"Rgba16i": 22,
"Rgba8i": 23,
"R32i": 24,
"Rg32i": 25,
"Rg16i": 26,
"Rg8i": 27,
"R16i": 28,
"R8i": 29,
"Rgba32ui": 30,
"Rgba16ui": 31,
"Rgba8ui": 32,
"R32ui": 33,
"Rgb10a2ui": 34,
"Rg32ui": 35,
"Rg16ui": 36,
"Rg8ui": 37,
"R16ui": 38,
"R8ui": 39,
},
"ImageChannelOrder": {
"R": 0,
"A": 1,
"RG": 2,
"RA": 3,
"RGB": 4,
"RGBA": 5,
"BGRA": 6,
"ARGB": 7,
"Intensity": 8,
"Luminance": 9,
"Rx": 10,
"RGx": 11,
"RGBx": 12,
"Depth": 13,
"DepthStencil": 14,
"sRGB": 15,
"sRGBx": 16,
"sRGBA": 17,
"sBGRA": 18,
"ABGR": 19,
},
"ImageChannelDataType": {
"SnormInt8": 0,
"SnormInt16": 1,
"UnormInt8": 2,
"UnormInt16": 3,
"UnormShort565": 4,
"UnormShort555": 5,
"UnormInt101010": 6,
"SignedInt8": 7,
"SignedInt16": 8,
"SignedInt32": 9,
"UnsignedInt8": 10,
"UnsignedInt16": 11,
"UnsignedInt32": 12,
"HalfFloat": 13,
"Float": 14,
"UnormInt24": 15,
"UnormInt101010_2": 16,
},
"ImageOperandsShift": {
"Bias": 0,
"Lod": 1,
"Grad": 2,
"ConstOffset": 3,
"Offset": 4,
"ConstOffsets": 5,
"Sample": 6,
"MinLod": 7,
"MakeTexelAvailable": 8,
"MakeTexelAvailableKHR": 8,
"MakeTexelVisible": 9,
"MakeTexelVisibleKHR": 9,
"NonPrivateTexel": 10,
"NonPrivateTexelKHR": 10,
"VolatileTexel": 11,
"VolatileTexelKHR": 11,
"SignExtend": 12,
"ZeroExtend": 13,
},
"ImageOperandsMask": {
"MaskNone": 0,
"Bias": 0x00000001,
"Lod": 0x00000002,
"Grad": 0x00000004,
"ConstOffset": 0x00000008,
"Offset": 0x00000010,
"ConstOffsets": 0x00000020,
"Sample": 0x00000040,
"MinLod": 0x00000080,
"MakeTexelAvailable": 0x00000100,
"MakeTexelAvailableKHR": 0x00000100,
"MakeTexelVisible": 0x00000200,
"MakeTexelVisibleKHR": 0x00000200,
"NonPrivateTexel": 0x00000400,
"NonPrivateTexelKHR": 0x00000400,
"VolatileTexel": 0x00000800,
"VolatileTexelKHR": 0x00000800,
"SignExtend": 0x00001000,
"ZeroExtend": 0x00002000,
},
"FPFastMathModeShift": {
"NotNaN": 0,
"NotInf": 1,
"NSZ": 2,
"AllowRecip": 3,
"Fast": 4,
},
"FPFastMathModeMask": {
"MaskNone": 0,
"NotNaN": 0x00000001,
"NotInf": 0x00000002,
"NSZ": 0x00000004,
"AllowRecip": 0x00000008,
"Fast": 0x00000010,
},
"FPRoundingMode": {"RTE": 0, "RTZ": 1, "RTP": 2, "RTN": 3},
"LinkageType": {"Export": 0, "Import": 1},
"AccessQualifier": {"ReadOnly": 0, "WriteOnly": 1, "ReadWrite": 2},
"FunctionParameterAttribute": {
"Zext": 0,
"Sext": 1,
"ByVal": 2,
"Sret": 3,
"NoAlias": 4,
"NoCapture": 5,
"NoWrite": 6,
"NoReadWrite": 7,
},
"Decoration": {
"RelaxedPrecision": 0,
"SpecId": 1,
"Block": 2,
"BufferBlock": 3,
"RowMajor": 4,
"ColMajor": 5,
"ArrayStride": 6,
"MatrixStride": 7,
"GLSLShared": 8,
"GLSLPacked": 9,
"CPacked": 10,
"BuiltIn": 11,
"NoPerspective": 13,
"Flat": 14,
"Patch": 15,
"Centroid": 16,
"Sample": 17,
"Invariant": 18,
"Restrict": 19,
"Aliased": 20,
"Volatile": 21,
"Constant": 22,
"Coherent": 23,
"NonWritable": 24,
"NonReadable": 25,
"Uniform": 26,
"UniformId": 27,
"SaturatedConversion": 28,
"Stream": 29,
"Location": 30,
"Component": 31,
"Index": 32,
"Binding": 33,
"DescriptorSet": 34,
"Offset": 35,
"XfbBuffer": 36,
"XfbStride": 37,
"FuncParamAttr": 38,
"FPRoundingMode": 39,
"FPFastMathMode": 40,
"LinkageAttributes": 41,
"NoContraction": 42,
"InputAttachmentIndex": 43,
"Alignment": 44,
"MaxByteOffset": 45,
"AlignmentId": 46,
"MaxByteOffsetId": 47,
"NoSignedWrap": 4469,
"NoUnsignedWrap": 4470,
"ExplicitInterpAMD": 4999,
"OverrideCoverageNV": 5248,
"PassthroughNV": 5250,
"ViewportRelativeNV": 5252,
"SecondaryViewportRelativeNV": 5256,
"PerPrimitiveNV": 5271,
"PerViewNV": 5272,
"PerTaskNV": 5273,
"PerVertexNV": 5285,
"NonUniform": 5300,
"NonUniformEXT": 5300,
"RestrictPointer": 5355,
"RestrictPointerEXT": 5355,
"AliasedPointer": 5356,
"AliasedPointerEXT": 5356,
"CounterBuffer": 5634,
"HlslCounterBufferGOOGLE": 5634,
"HlslSemanticGOOGLE": 5635,
"UserSemantic": 5635,
"UserTypeGOOGLE": 5636,
},
"BuiltIn": {
"Position": 0,
"PointSize": 1,
"ClipDistance": 3,
"CullDistance": 4,
"VertexId": 5,
"InstanceId": 6,
"PrimitiveId": 7,
"InvocationId": 8,
"Layer": 9,
"ViewportIndex": 10,
"TessLevelOuter": 11,
"TessLevelInner": 12,
"TessCoord": 13,
"PatchVertices": 14,
"FragCoord": 15,
"PointCoord": 16,
"FrontFacing": 17,
"SampleId": 18,
"SamplePosition": 19,
"SampleMask": 20,
"FragDepth": 22,
"HelperInvocation": 23,
"NumWorkgroups": 24,
"WorkgroupSize": 25,
"WorkgroupId": 26,
"LocalInvocationId": 27,
"GlobalInvocationId": 28,
"LocalInvocationIndex": 29,
"WorkDim": 30,
"GlobalSize": 31,
"EnqueuedWorkgroupSize": 32,
"GlobalOffset": 33,
"GlobalLinearId": 34,
"SubgroupSize": 36,
"SubgroupMaxSize": 37,
"NumSubgroups": 38,
"NumEnqueuedSubgroups": 39,
"SubgroupId": 40,
"SubgroupLocalInvocationId": 41,
"VertexIndex": 42,
"InstanceIndex": 43,
"SubgroupEqMask": 4416,
"SubgroupEqMaskKHR": 4416,
"SubgroupGeMask": 4417,
"SubgroupGeMaskKHR": 4417,
"SubgroupGtMask": 4418,
"SubgroupGtMaskKHR": 4418,
"SubgroupLeMask": 4419,
"SubgroupLeMaskKHR": 4419,
"SubgroupLtMask": 4420,
"SubgroupLtMaskKHR": 4420,
"BaseVertex": 4424,
"BaseInstance": 4425,
"DrawIndex": 4426,
"DeviceIndex": 4438,
"ViewIndex": 4440,
"BaryCoordNoPerspAMD": 4992,
"BaryCoordNoPerspCentroidAMD": 4993,
"BaryCoordNoPerspSampleAMD": 4994,
"BaryCoordSmoothAMD": 4995,
"BaryCoordSmoothCentroidAMD": 4996,
"BaryCoordSmoothSampleAMD": 4997,
"BaryCoordPullModelAMD": 4998,
"FragStencilRefEXT": 5014,
"ViewportMaskNV": 5253,
"SecondaryPositionNV": 5257,
"SecondaryViewportMaskNV": 5258,
"PositionPerViewNV": 5261,
"ViewportMaskPerViewNV": 5262,
"FullyCoveredEXT": 5264,
"TaskCountNV": 5274,
"PrimitiveCountNV": 5275,
"PrimitiveIndicesNV": 5276,
"ClipDistancePerViewNV": 5277,
"CullDistancePerViewNV": 5278,
"LayerPerViewNV": 5279,
"MeshViewCountNV": 5280,
"MeshViewIndicesNV": 5281,
"BaryCoordNV": 5286,
"BaryCoordNoPerspNV": 5287,
"FragSizeEXT": 5292,
"FragmentSizeNV": 5292,
"FragInvocationCountEXT": 5293,
"InvocationsPerPixelNV": 5293,
"LaunchIdNV": 5319,
"LaunchSizeNV": 5320,
"WorldRayOriginNV": 5321,
"WorldRayDirectionNV": 5322,
"ObjectRayOriginNV": 5323,
"ObjectRayDirectionNV": 5324,
"RayTminNV": 5325,
"RayTmaxNV": 5326,
"InstanceCustomIndexNV": 5327,
"ObjectToWorldNV": 5330,
"WorldToObjectNV": 5331,
"HitTNV": 5332,
"HitKindNV": 5333,
"IncomingRayFlagsNV": 5351,
"WarpsPerSMNV": 5374,
"SMCountNV": | |
from deepmechanics.cell import QuadCell
from deepmechanics.utilities import make_array_unique, tensorize_1d, tensorize_2d
class Grid:
def __init__(self, spatial_dimensions):
self.spatial_dimensions = spatial_dimensions
self.base_cells = []
self._leaf_cells = []
self._active_leaf_cells = []
self._refinement_strategy = None
def generate(self):
pass
@property
def refinement_strategy(self):
if self._refinement_strategy is None:
raise ValueError("Refinement strategy is not initialized")
return self._refinement_strategy
@refinement_strategy.setter
def refinement_strategy(self, value):
self._refinement_strategy = value
@property
def leaf_cells(self):
self._leaf_cells.clear()
for cell in self.base_cells:
self._leaf_cells += cell.leaves
return self._leaf_cells
@property
def active_leaf_cells(self):
self._active_leaf_cells.clear()
for cell in self.base_cells:
self._active_leaf_cells += cell.active_leaves
return self._active_leaf_cells
def refine(self):
self.refinement_strategy.refine(self)
class PlanarCartesianGrid(Grid):
def __init__(self, x_start, y_start, x_end, y_end, resolution_x, resolution_y):
super().__init__(2)
self.x_start = x_start
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.resolution_x = resolution_x
self.resolution_y = resolution_y
self.generate()
def generate(self):
if self.base_cells:
raise ValueError("Grid already generated!")
dx = self.length_x / self.resolution_x
dy = self.length_y / self.resolution_y
for j in range(self.resolution_y):
for i in range(self.resolution_x):
x_start_cell = self.x_start + dx * i
x_end_cell = x_start_cell + dx
y_start_cell = self.y_start + dy * j
y_end_cell = y_start_cell + dy
self.base_cells.append(QuadCell(x_start_cell, y_start_cell, x_end_cell, y_end_cell))
def triangulate(self):
triangles = []
for i in range(len(self.active_leaf_cells)):
triangles.append([4*i, 4*i+1, 4*i+3])
triangles.append([4*i, 4*i+3, 4*i+2])
return triangles
@property
def top_base_cells(self):
return self.base_cells[-self.resolution_x:]
@property
def top_leaf_cells(self):
leaf_cells = []
for cell in self.top_base_cells:
leaf_cells += cell.top_leaves
return leaf_cells
@property
def bottom_base_cells(self):
return self.base_cells[:self.resolution_x]
@property
def bottom_leaf_cells(self):
leaf_cells = []
for cell in self.bottom_base_cells:
leaf_cells += cell.bottom_leaves
return leaf_cells
@property
def right_base_cells(self):
return self.base_cells[self.i_end::self.resolution_x]
@property
def right_leaf_cells(self):
leaf_cells = []
for cell in self.right_base_cells:
leaf_cells += cell.right_leaves
return leaf_cells
@property
def left_base_cells(self):
return self.base_cells[::self.resolution_x]
@property
def left_leaf_cells(self):
leaf_cells = []
for cell in self.left_base_cells:
leaf_cells += cell.left_leaves
return leaf_cells
@property
def length_x(self):
return self.x_end - self.x_start
@property
def length_y(self):
return self.y_end - self.y_start
@property
def i_end(self):
return self.resolution_x - 1
@property
def j_end(self):
return self.resolution_y - 1
@property
def integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.active_leaf_cells:
xs, ys = cell.integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def integration_point_weights(self):
weights = []
for cell in self.active_leaf_cells:
weights += cell.integration_point_weights
return weights
@property
def integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.active_leaf_cells:
jacobian_dets += cell.integration_point_jacobian_dets
return jacobian_dets
@property
def top_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.top_base_cells:
xs, ys = cell.top_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def top_edge_integration_point_weights(self):
weights = []
for cell in self.top_base_cells:
weights += cell.top_edge_integration_point_weights
return weights
@property
def top_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.top_base_cells:
jacobian_dets += cell.top_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def bottom_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.bottom_base_cells:
xs, ys = cell.bottom_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def bottom_edge_integration_point_weights(self):
weights = []
for cell in self.bottom_base_cells:
weights += cell.bottom_edge_integration_point_weights
return weights
@property
def bottom_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.bottom_base_cells:
jacobian_dets += cell.bottom_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def right_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.right_base_cells:
xs, ys = cell.right_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def right_edge_integration_point_weights(self):
weights = []
for cell in self.right_base_cells:
weights += cell.right_edge_integration_point_weights
return weights
@property
def right_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.right_base_cells:
jacobian_dets += cell.right_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def left_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.left_base_cells:
xs, ys = cell.left_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def left_edge_integration_point_weights(self):
weights = []
for cell in self.left_base_cells:
weights += cell.left_edge_integration_point_weights
return weights
@property
def left_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.left_base_cells:
jacobian_dets += cell.left_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def top_coords(self):
all_xs = []
all_ys = []
for i in range(self.resolution_x):
cell = self.get_cell_at_indices(i, self.j_end)
for leaf in cell.leaves:
xs, ys = leaf.top_coords
if self.y_end in ys:
all_xs += xs
all_xs = make_array_unique(all_xs)
all_xs.sort()
all_ys = [self.y_end] * len(all_xs)
return all_xs, all_ys
@property
def bottom_coords(self):
all_xs = []
all_ys = []
for i in range(self.resolution_x):
cell = self.get_cell_at_indices(i, 0)
for leaf in cell.leaves:
xs, ys = leaf.bottom_coords
if self.y_start in ys:
all_xs += xs
all_xs = make_array_unique(all_xs)
all_xs.sort()
all_ys = [self.y_start] * len(all_xs)
return all_xs, all_ys
@property
def right_coords(self):
all_xs = []
all_ys = []
for j in range(self.resolution_y):
cell = self.get_cell_at_indices(self.i_end, j)
for leaf in cell.leaves:
xs, ys = leaf.right_coords
if self.x_end in xs:
all_ys += ys
all_ys = make_array_unique(all_ys)
all_ys.sort()
all_xs = [self.x_end] * len(all_ys)
return all_xs, all_ys
@property
def left_coords(self):
all_xs = []
all_ys = []
for j in range(self.resolution_y):
cell = self.get_cell_at_indices(0, j)
for leaf in cell.leaves:
xs, ys = leaf.left_coords
if self.x_start in xs:
all_ys += ys
all_ys = make_array_unique(all_ys)
all_ys.sort()
all_xs = [self.x_start] * len(all_ys)
return all_xs, all_ys
@property
def corner_coords(self):
all_xs = []
all_ys = []
for cell in self.active_leaf_cells:
xs, ys = cell.corner_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
def get_samples(self, filter=None, number_of_samples_x=100, number_of_samples_y=100):
all_xs = []
all_ys = []
dx = self.length_x / (number_of_samples_x - 1)
dy = self.length_y / (number_of_samples_y - 1)
for i in range(number_of_samples_x):
x = self.x_start + i * dx
for j in range(number_of_samples_y):
y = self.y_start + j * dy
if filter is None:
all_xs.append(x)
all_ys.append(y)
elif filter(x, y):
all_xs.append(x)
all_ys.append(y)
return all_xs, all_ys
def set_active_state_with_filter(self, filter, seeds_per_side=10):
for cell in self.leaf_cells:
cell.is_active = cell.is_inside(filter, seeds_per_side)
def _index_exists(self, i, j):
return 0 <= i <= self.i_end and 0 <= j <= self.j_end
def get_cell_at_indices(self, i, j):
if self._index_exists(i, j):
return self.base_cells[j * self.resolution_x + i]
raise ValueError("Indices ({},{}) are outside the grid".format(i, j))
def _point_is_inside_grid(self, x, y):
return self.x_start <= x <= self.x_end and self.y_start <= y <= self.y_end
def get_cell_indices_from_coords(self, x, y):
if self._point_is_inside_grid(x, y):
i = int((x - self.x_start) / self.length_x)
j = int((y - self.y_start) / self.length_y)
return i, j
raise ValueError("Point ({},{}) is outside the grid".format(x, y))
def get_cell_from_coords(self, x, y):
i, j = self.get_cell_indices_from_coords(x, y)
return self.get_cell_at_indices(i, j)
class TensorizedPlanarCartesianGrid(PlanarCartesianGrid):
def __init__(self, x_start, y_start, x_end, y_end, resolution_x, resolution_y):
super().__init__(x_start, y_start, x_end, y_end, resolution_x, resolution_y)
# Cashed values for efficiency
self._integration_point_coords = None
self._integration_point_weights = None
self._integration_point_jacobian_dets = None
self._integration_point_xs = None
self._integration_point_ys = None
self._top_edge_integration_point_coords = None
self._top_edge_integration_point_weights = None
self._top_edge_integration_point_jacobian_dets = None
self._top_edge_integration_point_xs = None
self._top_edge_integration_point_ys = None
self._bottom_edge_integration_point_coords = None
self._bottom_edge_integration_point_weights = None
self._bottom_edge_integration_point_jacobian_dets = None
self._bottom_edge_integration_point_xs = None
self._bottom_edge_integration_point_ys = None
self._right_edge_integration_point_coords = None
self._right_edge_integration_point_weights = None
self._right_edge_integration_point_jacobian_dets = None
self._right_edge_integration_point_xs = None
self._right_edge_integration_point_ys = None
self._left_edge_integration_point_coords = None
self._left_edge_integration_point_weights = None
self._left_edge_integration_point_jacobian_dets = None
self._left_edge_integration_point_xs = None
self._left_edge_integration_point_ys = None
self._samples_coords = None
self._samples_xs = None
self._samples_ys = None
@property
def integration_point_coords(self):
if self._integration_point_coords is None:
xs, ys = super().integration_point_coords
self._integration_point_coords = tensorize_2d(xs, ys)
return self._integration_point_coords
@property
def integration_point_weights(self):
if self._integration_point_weights is None:
weights = super().integration_point_weights
self._integration_point_weights = tensorize_1d(weights)
return self._integration_point_weights
@property
def integration_point_jacobian_dets(self):
if self._integration_point_jacobian_dets is None:
jacobian_dets = super().integration_point_jacobian_dets
self._integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._integration_point_jacobian_dets
@property
def integration_point_xs(self):
if self._integration_point_xs is None:
self._integration_point_xs = self.integration_point_coords[:, 0].view(-1, 1)
return self._integration_point_xs
@property
def integration_point_ys(self):
if self._integration_point_ys is None:
self._integration_point_ys = self.integration_point_coords[:, 1].view(-1, 1)
return self._integration_point_ys
@property
def integration_points_data(self):
return self.integration_point_coords, self.integration_point_weights, self.integration_point_jacobian_dets
@property
def top_edge_integration_point_coords(self):
if self._top_edge_integration_point_coords is None:
xs, ys = super().top_edge_integration_point_coords
self._top_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._top_edge_integration_point_coords
@property
def top_edge_integration_point_weights(self):
if self._top_edge_integration_point_weights is None:
weights = super().top_edge_integration_point_weights
self._top_edge_integration_point_weights = tensorize_1d(weights)
return self._top_edge_integration_point_weights
@property
def top_edge_integration_point_jacobian_dets(self):
if self._top_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().top_edge_integration_point_jacobian_dets
self._top_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._top_edge_integration_point_jacobian_dets
@property
def top_edge_integration_point_xs(self):
if self._top_edge_integration_point_xs is None:
self._top_edge_integration_point_xs = self.top_edge_integration_point_coords[:, 0].view(-1, 1)
return self._top_edge_integration_point_xs
@property
def top_edge_integration_point_ys(self):
if self._top_edge_integration_point_ys is None:
self._top_edge_integration_point_ys = self.top_edge_integration_point_coords[:, 1].view(-1, 1)
return self._top_edge_integration_point_ys
@property
def top_edge_integration_points_data(self):
return self.top_edge_integration_point_coords, self.top_edge_integration_point_weights, self.top_edge_integration_point_jacobian_dets
@property
def bottom_edge_integration_point_coords(self):
if self._bottom_edge_integration_point_coords is None:
xs, ys = super().bottom_edge_integration_point_coords
self._bottom_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._bottom_edge_integration_point_coords
@property
def bottom_edge_integration_point_weights(self):
if self._bottom_edge_integration_point_weights is None:
weights = super().bottom_edge_integration_point_weights
self._bottom_edge_integration_point_weights = tensorize_1d(weights)
return self._bottom_edge_integration_point_weights
@property
def bottom_edge_integration_point_jacobian_dets(self):
if self._bottom_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().bottom_edge_integration_point_jacobian_dets
self._bottom_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._bottom_edge_integration_point_jacobian_dets
@property
def bottom_edge_integration_point_xs(self):
if self._bottom_edge_integration_point_xs | |
<reponame>jihwanlee-alphago/aqt
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aqt.jax.compute_cost_utils."""
import logging
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.jax import compute_cost_utils
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import get_bounds
from aqt.jax_legacy.jax import hlo_utils
from aqt.jax_legacy.jax import quant_config
from aqt.jax_legacy.jax import quantization
from aqt.jax_legacy.jax.quantization import QuantOps
from aqt.jax_legacy.jax.quantization import QuantType
from flax import linen as nn
from jax import random
from jax._src.lax import convolution as lax_convolution
from jax._src.lax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
FLAGS = flags.FLAGS
class ComputeCostUtilsTest(parameterized.TestCase):
def setUp(self):
super(ComputeCostUtilsTest, self).setUp()
self.rng_key = random.PRNGKey(0)
def compare_hlo_instructions(self, hlo_no_annotation, hlo_w_annotation):
"""Compares two HLO models to check if they only differ in metadata info."""
instrs_n = []
instrs_w = []
# gather instructions from both HLO models
for computation in hlo_no_annotation.computations:
for instr in computation.instructions:
instrs_n.append(instr)
for computation in hlo_w_annotation.computations:
for instr in computation.instructions:
instrs_w.append(instr)
self.assertEqual(len(instrs_n), len(instrs_w))
for i, _ in enumerate(instrs_n):
# check instructions with the opcode 'convolution'
# the metadata field for instrs_w and instrs_n should be different.
if (instrs_n[i].opcode == 'convolution' and
instrs_w[i].opcode == 'convolution'):
self.assertNotEqual(instrs_n[i].metadata, instrs_w[i].metadata)
# remove metadata op_type and op_name
instrs_n[i].metadata.op_type = ''
instrs_w[i].metadata.op_type = ''
instrs_n[i].metadata.op_name = ''
instrs_w[i].metadata.op_name = ''
# compare the rest of the instructions.
self.assertEqual(instrs_n[i], instrs_w[i])
class TestModelWith1Dense(nn.Module):
"""Test model with a single DenseAqt layer."""
@nn.compact
def __call__(self, inputs, hparams, num_classes, dtype=jnp.float32):
output = aqt_flax_layers.DenseAqt(
features=num_classes,
dtype=dtype,
train=False,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
paxis_name='batch',
hparams=hparams,
)(inputs, padding_mask=None)
return output
class TestModelWith1Conv(nn.Module):
"""Test model with a single ConvAqt layer."""
@nn.compact
def __call__(self,
inputs,
hparams,
kernel_size,
num_filters,
strides,
dtype=jnp.float32):
output = aqt_flax_layers.ConvAqt(
features=num_filters,
kernel_size=kernel_size,
strides=strides,
use_bias=False,
dtype=dtype,
train=False,
quant_context=quant_config.QuantContext(update_bounds=False),
paxis_name='batch',
hparams=hparams)(
inputs)
return output
class TestModelWith1DynamicMatmul(nn.Module):
"""Test model with a single dynamic matmul."""
@nn.compact
def __call__(self, lhs_act, rhs_act, lhs_prec, rhs_prec):
get_bounds_hyper = get_bounds.GetBounds.Hyper(
initial_bound=10.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.PER_TENSOR)
lhs_act_hparams = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=get_bounds_hyper,
prec=lhs_prec,
half_shift=False)
rhs_act_hparams = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=get_bounds_hyper,
prec=rhs_prec,
half_shift=False)
lhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=False, update_bounds=False, module_name='lhs')
rhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=False, update_bounds=False, module_name='rhs')
output = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_act_hparams,
rhs_act_hparams=rhs_act_hparams,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.AQT,
lhs_get_bounds_params=lhs_get_bounds_params,
rhs_get_bounds_params=rhs_get_bounds_params)
return output
@parameterized.named_parameters(
# TestModelWith1Dense
dict(
testcase_name='single_dense_layer_bfloat16',
modelclass=TestModelWith1Dense,
input_shapes=[(1, 8)],
model_kwargs={
'num_classes': 2,
'hparams': aqt_flax_layers.DenseAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False
),
},
expected_compute_cost=8 * 2 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=8 * 2 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=8 * 2 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dense_layer_w8_a8',
modelclass=TestModelWith1Dense,
input_shapes=[(1, 8)],
model_kwargs={
'num_classes': 2,
'hparams': aqt_flax_layers.DenseAqt.HParams(
weight_prec=8,
quant_type=QuantType.FAKE_QUANT,
quant_act=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,
prec=8,
bounds=1.0,
half_shift=False,
),
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False
),
},
expected_compute_cost=8 * 2 * (8 * 8),
expected_compute_cost_ratio=0.25,
expected_compute_cost_linear=8 * 2 * (8),
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=8 * 2 * (8),
expected_memory_cost_ratio=0.5,
),
# TestModelWith1Conv
dict(
testcase_name='single_conv_layer_bfloat16',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_bfloat16_strided',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (4, 2),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_bfloat16_3d',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3, 3),
'num_filters': 16,
'strides': (1, 1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_w4_a2',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=4,
quant_type=QuantType.FAKE_QUANT,
quant_act=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,
prec=2,
bounds=1.0,
half_shift=False,
),
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (4 * 2),
expected_compute_cost_ratio=0.03125,
expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (4),
expected_compute_cost_ratio_linear=0.25,
expected_memory_cost=(3 * 3) * 3 * 16 * (4),
expected_memory_cost_ratio=0.25,
),
# TestModelWith1DynamicMatmul
dict(
testcase_name='single_dynamic_matmul_layer_bfloat16',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': None,
'rhs_prec': None},
expected_compute_cost=8 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=8 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dynamic_matmul_layer_l8_r8',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': 8,
'rhs_prec': 8},
expected_compute_cost=8 * (8 * 8),
expected_compute_cost_ratio=0.25,
expected_compute_cost_linear=8 * 8,
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dynamic_matmul_layer_l8_r4',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': 8,
'rhs_prec': 4},
expected_compute_cost=8 * (8 * 4),
expected_compute_cost_ratio=0.125,
expected_compute_cost_linear=8 * (8),
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
) # pylint: disable=line-too-long
def test_estimate_simple_model_cost(
self, modelclass, input_shapes, model_kwargs, expected_compute_cost,
expected_compute_cost_ratio, expected_compute_cost_linear,
expected_compute_cost_ratio_linear, expected_memory_cost,
expected_memory_cost_ratio):
module = modelclass()
input_shapes_with_type = [(sh, jnp.float32) for sh in input_shapes]
dummy_inputs = [
jnp.ones(input_shape, dtype=dtype)
for (input_shape, dtype) in input_shapes_with_type
]
init_state = module.init(random.PRNGKey(0), *dummy_inputs, **model_kwargs)
hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,
input_shapes,
**model_kwargs)
compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)
memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)
logging.info('compute cost result is %s', compute_result)
logging.info('memory cost result is %s', memory_result)
self.assertEqual(compute_result['compute_cost'], expected_compute_cost)
self.assertEqual(memory_result['memory_cost'], expected_memory_cost)
self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16'],
expected_compute_cost_ratio)
self.assertEqual(memory_result['memory_cost_ratio_to_bfloat16'],
expected_memory_cost_ratio)
self.assertEqual(compute_result['compute_cost_linear'],
expected_compute_cost_linear)
self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16_linear'],
expected_compute_cost_ratio_linear)
@parameterized.named_parameters(
# TestModelWith1Dense
dict(
testcase_name='single_dense_layer_bfloat16_batch_size',
modelclass=TestModelWith1Dense,
input_shape_per_sample=(16,),
model_kwargs={
'num_classes':
20,
'hparams':
aqt_flax_layers.DenseAqt.HParams(
weight_prec=None,
quant_act=None,
quant_type=QuantType.FAKE_QUANT,
weight_quant_granularity=quant_config.QuantGranularity
.PER_CHANNEL,
weight_half_shift=False)
},
),
# TestModelWith1Conv
dict(
testcase_name='single_conv_layer_bfloat16_batch_size',
modelclass=TestModelWith1Conv,
input_shape_per_sample=(16, 16, 3),
model_kwargs={
'kernel_size': (3, 3),
'num_filters':
16,
'strides': (2, 2),
'hparams':
aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_act=None,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False,
)
},
),
)
def test_batch_size_has_no_effect_on_cost(self, modelclass,
input_shape_per_sample,
model_kwargs):
expected_compute_cost = None
expected_memory_cost = None
batch_size_list = [32, 64, 128, 256, 512, 1024]
module = modelclass()
# Sweep over the batch size list
for batch_size in batch_size_list:
input_shape = (batch_size,) + input_shape_per_sample
init_state = module.init(
random.PRNGKey(0), jnp.ones(input_shape, jnp.float32), **model_kwargs)
hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,
[input_shape],
**model_kwargs)
del init_state
compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)
memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)
# Save the first cost and compare it with the rest
if expected_compute_cost is None:
expected_compute_cost = compute_result['compute_cost']
else:
self.assertEqual(compute_result['compute_cost'], expected_compute_cost)
if expected_memory_cost is None:
expected_memory_cost = memory_result['memory_cost']
else:
self.assertEqual(memory_result['memory_cost'], expected_memory_cost)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_check_value_inside_and_outside_of_context_conv_general(
self, weight_prec):
original_op_name = 'conv_general_dilated'
# The 'name' in primitive should change in the context in 'flax_layers'
# if the context is enabled
self.assertEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
with compute_cost_utils.ConvMetadataMonkeyPatch(
weight_prec=weight_prec, act_prec=None):
self.assertNotEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
self.assertEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8, acts_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4, acts_prec=4),
)
def test_annotation_only_changes_hlo_metadata_conv(self, weight_prec,
acts_prec):
FLAGS.metadata_enabled = False
quant_act = quantization.QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,
prec=acts_prec,
bounds=1.0,
half_shift=False)
input_shape = (1, 8, 8, 3)
module_no_annotation = aqt_flax_layers.ConvAqt(
features=4,
kernel_size=(3, 3),
padding='VALID',
paxis_name='batch',
quant_context=quant_config.QuantContext(update_bounds=False),
train=False,
hparams=aqt_flax_layers.ConvAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False),
kernel_init=initializers.ones,
bias_init=initializers.ones,
dtype=jnp.float32)
init_state = module_no_annotation.init(self.rng_key,
jnp.ones(input_shape, jnp.float32))
output_no_annotation = module_no_annotation.apply(init_state,
jnp.ones(input_shape))
hlo_no_annotation = hlo_utils.load_hlo_proto_from_model(
module_no_annotation, init_state, [input_shape])
del init_state
FLAGS.metadata_enabled = True
module_w_annotation = aqt_flax_layers.ConvAqt(
features=4,
kernel_size=(3, 3),
padding='VALID',
paxis_name='batch',
quant_context=quant_config.QuantContext(update_bounds=False),
train=False,
hparams=aqt_flax_layers.ConvAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False),
kernel_init=initializers.ones,
bias_init=initializers.ones,
dtype=jnp.float32)
init_state = module_w_annotation.init(self.rng_key,
jnp.ones(input_shape, jnp.float32))
output_w_annotation = module_w_annotation.apply(init_state,
jnp.ones(input_shape))
hlo_w_annotation = hlo_utils.load_hlo_proto_from_model(
module_w_annotation, init_state, [input_shape])
del init_state
onp.testing.assert_array_equal(output_no_annotation, output_w_annotation)
self.compare_hlo_instructions(hlo_no_annotation, hlo_w_annotation)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_check_value_inside_and_outside_of_context_dot_general(
self, weight_prec):
original_op_name = 'dot_general'
# The 'name' in primitive should change in the context in 'flax_layers'
# if the context is enabled.
self.assertEqual(original_op_name, lax.dot_general_p.name)
with compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=None, rhs_prec=weight_prec, rhs_is_weight=True):
self.assertNotEqual(original_op_name, lax.dot_general_p.name)
self.assertEqual(original_op_name, lax.dot_general_p.name)
@parameterized.named_parameters(
dict(
testcase_name='quant_8bit',
weight_prec=8,
acts_prec=8,
),)
def test_annotation_only_changes_hlo_metadata_dense(self, weight_prec,
acts_prec):
FLAGS.metadata_enabled = False
quant_act = quantization.QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,
prec=acts_prec,
bounds=1.0,
half_shift=False)
input_shape = (1, 16)
module_no_annotation = aqt_flax_layers.DenseAqt(
features=4,
use_bias=False,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
paxis_name='batch',
train=False,
| |
<reponame>Skar0/generalizedparity
import copy
from antichain import Antichain
from graph import Graph
import operations
from collections import deque, defaultdict
from attractors import init_out, attractor
DEBUG_PRINT = False
# TODO careful of aliases on the values in the lists, changing one somewhere might change another somewhere else
# The following functions are cced and adapted from psolC
def R_set_func(g, func, target_set, j):
"""
We compute the attractor of a set of node-priority pairs where the priority
represents the maximal priority seen so far.
"""
ascending_priorities = g.get_sorted_priorities_func(func)
v_out = init_out(g) # a counter for visited edges from each vertex
out = {(v, p): v_out[v] for v in g.get_nodes()
for p in ascending_priorities}
regions = defaultdict(lambda: -1)
adversary = operations.opponent(j)
# we keep a queue of newly found winning vertex-priority pairs
queue = deque(target_set)
while queue:
(node, priority) = queue.popleft()
for pred in g.get_predecessors(node):
pred_player = g.get_node_player(pred)
pred_priority = g.get_node_priority_function_i(pred, func)
if pred_priority > priority:
continue # cannot be a predecessor
if priority > g.get_node_priority_function_i(node, func):
options = [priority]
else:
assert (priority == g.get_node_priority_function_i(node, func))
options = filter(lambda x: x >= pred_priority and
x <= priority, ascending_priorities)
assert (len(options) > 0)
for p in options:
if regions[(pred, p)] == -1: # vertex-priority is undecided
if pred_player == j:
regions[(pred, p)] = j
if (pred, p) not in target_set:
queue.append((pred, p))
elif pred_player == adversary:
out[(pred, p)] -= 1
if out[(pred, p)] == 0:
regions[(pred, p)] = j
if (pred, p) not in target_set:
queue.append((pred, p))
# prepare output
W = set()
for n in g.get_nodes():
if regions[(n, g.get_node_priority_function_i(n, func))] == j:
W.add(n)
return W
def jfs_algo_func(g, func, j):
assert (j == 0 or j == 1)
j_priorities = filter(lambda x: (x % 2) == j,
g.get_sorted_priorities_func(func))
T = set([(v, p) for v in g.get_nodes()
for p in filter(lambda x: x >= g.get_node_priority_function_i(v, func),
j_priorities)])
next_F = R_set_func(g, func, T, j)
F = set()
while next_F != F:
F = next_F
T = set([(v, p) for v in F
for p in filter(lambda x: x >= g.get_node_priority_function_i(v, func),
j_priorities)])
next_F = R_set_func(g, func, T, j)
next_F = next_F & F
return F
def psolC_func(g, func, W1, W2):
safe_episodes = jfs_algo_func(g, func, 0)
subgame = g
if len(safe_episodes) > 0:
A, complement = attractor(subgame, safe_episodes, 0)
W1.extend(A)
subgame = subgame.subgame(complement)
subgame, W1, W2 = psolC_func(subgame, func, W1, W2)
safe_episodes = jfs_algo_func(subgame, func, 1)
if len(safe_episodes) > 0:
A, complement = attractor(subgame, safe_episodes, 1)
W2.extend(A)
subgame = subgame.subgame(complement)
subgame, W1, W2 = psolC_func(subgame, func, W1, W2)
return subgame, W1, W2
def psolC_generalized(g, W0, W1):
# base case : game is empty
if g.get_nodes() == []:
if DEBUG_PRINT: print("Base case return")
return g, W0, W1
# else retrieve useful information on the game
nbr_func = g.get_nbr_priority_functions() # number of functions
priorities = [[] for z in xrange(nbr_func)] # setup list containing list of priorities for each function
even_priorities = [[] for z in xrange(nbr_func)] # setup list containing list of even priorities for each function
# first, retrieve all priorities and put them in the lists of priorities for each function
for node in g.nodes.iterkeys():
for func in range(nbr_func):
priorities[func].append(g.get_node_priority_function_i(node, func + 1)) # function are numbered 1 to k
# sort priorities and create the lists containing only the even priorities
for func in range(nbr_func):
priorities[func] = sorted(set(priorities[func]), reverse=False) # change into set to remove duplicates and sort
even_priorities[func] = filter(lambda x: x % 2 == 0, priorities[func]) # keep the sorted even priorities
# if there are no even priorities according to one of the functions, the game is completely won by player 1
# return empty game and all nodes added to W1
if len(even_priorities[func]) == 0:
W1.extend(g.nodes.keys())
return Graph(), W0, W1
if DEBUG_PRINT:
print("Priorities " + str(priorities))
print("Even priorities " + str(even_priorities))
# handle odd priorities by calling psolC with the correct function
for i in range(1, nbr_func + 1):
safe_episodes = jfs_algo_func(g, i, 1)
if len(safe_episodes) > 0:
A, complement = attractor(g, safe_episodes, 1)
W1.extend(A)
subgame = g.subgame(complement)
return psolC_generalized(subgame, W0, W1)
# handle even priorities
w = truc(g, nbr_func, even_priorities, priorities)
if len(w) > 0:
A, complement = attractor(g, w, 0)
W0.extend(A)
subgame = g.subgame(complement)
return psolC_generalized(subgame, W0, W1)
return g, W0, W1
def truc(g, nbr_func, even_priorities, priorities):
"""
Compute the fixpoint with candidate set F and target set T
:param g:
:type g:
:param nbr_func:
:type nbr_func:
:param even_priorities:
:type even_priorities:
:return:
:rtype:
"""
# priorities are sorted so we can take the min priority easily
max_values = [l[len(l)-1] for l in priorities]
#print("max priorities "+str(max_even_priorities))
empty_set = set()
cache = set()
t = set(g.nodes.keys())
while cache != t and t != empty_set:
cache = t
att = compute_fixpoint(g, t, nbr_func, even_priorities, max_values)
res = set()
for elem in init_nodes(g, t, nbr_func):
if att.contains_element(elem):
res.add(elem[0])
t = t.intersection(res)
return t
def init_nodes(graph, nodes, nbr_func):
res = []
for node in nodes:
init = [0] * (nbr_func + 1)
init[0] = node
for i in range(1, nbr_func + 1):
init[i] = graph.get_node_priority_function_i(node, i)
res.append(init)
return res
def intersector(x, y):
"""
Intersection between two memory values x = m' and y = m.
"""
parity_x = x % 2
parity_y = y % 2
if parity_x == 1:
if parity_y == 1:
return min(x, y)
else:
return y
if parity_x == 0:
if parity_y == 0:
return max(x, y)
else:
return x
def comparator(x, y):
"""
Comparison between two memory values x = m' and y = m. We want to check whether x <= y
"""
parity_x = x % 2
parity_y = y % 2
if parity_x == 1:
if parity_y == 1:
return x <= y
else:
return False
if parity_x == 0:
if parity_y == 0:
return x >= y
else:
return True
def intersector_generalized(x, y):
"""
Intersection between two elements [v', m_1', ... m'_k] [v, m_1, ... m_k] is possible iff v = v' (else, elements are
incomparable and function yields -1). Then we just apply intersection between each memory value.
"""
if x[0] != y[0]:
return -1
else:
nbr_functions = len(x)
res = [x[0]]
for func in range(1, nbr_functions):
res.append(intersector(x[func], y[func]))
return res
def comparator_generalized(x, y):
"""
Comparison between two elements [v', m_1', ... m'_k] [v, m_1, ... m_k] is possible iff v = v' (else, elements are
incomparable and function yields False). Then we just compare each memory value.
"""
if x[0] != y[0]:
return False
else:
nbr_functions = len(x)
for func in range(1, nbr_functions):
if not comparator(x[func], y[func]):
return False
return True
def down_generalized(element, priorities, node, nbr_functions, max_values):
"""
Computes the largest m = [m_1, ..., m_k] such that up(m, priorities) <= m' = element[1:]. Then we add node to
obtain [node, m]. When computing down, priorities is a tuple of size k which gives the encountered priority
according to each priority function. Max_values records the maximum value to know when a memory value is not
defined.
"""
# print(element, priorities, node, nbr_functions, max_values)
# resulting node
res = [0] * (nbr_functions + 1)
res[0] = node
# for each priority function (numbered from 1 to k)
for func in range(1, nbr_functions + 1):
encountered_priority_p = priorities[func - 1]
# if priority encountered is even
if encountered_priority_p % 2 == 0:
m_prim = element[func]
if encountered_priority_p < m_prim:
res[func] = m_prim
else:
res[func] = max(encountered_priority_p - 1, 0)
else:
m_prim = element[func]
if encountered_priority_p <= m_prim:
res[func] = m_prim
else:
if encountered_priority_p != max_values[func - 1]:
res[func] = encountered_priority_p + 1
else:
return -1
return res
def create_start_antichain(starting_nodes, nbr_func, even_values):
# TODO this is a crude creation adding every possibility, we can directly add the max elements io the max
# even value for each
start_antichain = Antichain(comparator_generalized, intersector_generalized)
# create the antichain of maximal elements of the safe set
# every counter in every tuple has the maximal value
for node in starting_nodes:
temp = [0] * (nbr_func + 1)
temp[0] = node
for | |
print('no dot found')
continue
gloss = Gloss.objects.select_related().get(pk=pk)
# This is no longer allowed. The column is skipped.
# Updating the lemma idgloss is a special procedure, not only because it has relations to other parts of
# the database, but also because it only can be evaluated after reviewing all lemma idgloss translations
lemma_idgloss_key_prefix = "Lemma ID Gloss ("
if fieldname.startswith(lemma_idgloss_key_prefix):
language_name = fieldname[len(lemma_idgloss_key_prefix):-1]
if gloss not in lemmaidglosstranslations_per_gloss:
lemmaidglosstranslations_per_gloss[gloss] = {}
lemmaidglosstranslations_per_gloss[gloss][language_name] = new_value
# compare new value to existing value
language_name_column = settings.DEFAULT_LANGUAGE_HEADER_COLUMN['English']
languages = Language.objects.filter(**{language_name_column:language_name})
if languages:
language = languages[0]
lemma_idglosses = gloss.lemma.lemmaidglosstranslation_set.filter(language=language)
if lemma_idglosses:
lemma_idgloss_string = lemma_idglosses[0].text
else:
# lemma not set
lemma_idgloss_string = ''
if lemma_idgloss_string != new_value and new_value != 'None' and new_value != '':
error_string = 'ERROR: Attempt to update Lemma ID Gloss translations: ' + new_value
# print('error string: ', error_string)
if error:
error.append(error_string)
else:
error = [error_string]
messages.add_message(request, messages.ERROR, ('Attempt to update Lemma ID Gloss translations.'))
continue # avoid default field update
# Updating the annotation idgloss is a special procedure, because it has relations to other parts of the
# database
annotation_idgloss_key_prefix = "Annotation ID Gloss ("
if fieldname.startswith(annotation_idgloss_key_prefix):
language_name_column = settings.DEFAULT_LANGUAGE_HEADER_COLUMN['English']
language_name = fieldname[len(annotation_idgloss_key_prefix):-1]
languages = Language.objects.filter(**{language_name_column:language_name})
if languages:
language = languages[0]
annotation_idglosses = gloss.annotationidglosstranslation_set.filter(language=language)
if annotation_idglosses:
annotation_idgloss = annotation_idglosses[0]
annotation_idgloss.text = new_value
annotation_idgloss.save()
continue
keywords_key_prefix = "Keywords ("
# Updating the keywords is a special procedure, because it has relations to other parts of the database
if fieldname.startswith(keywords_key_prefix):
language_name_column = settings.DEFAULT_LANGUAGE_HEADER_COLUMN['English']
language_name = fieldname[len(keywords_key_prefix):-1]
languages = Language.objects.filter(**{language_name_column:language_name})
if languages:
language = languages[0]
language_code_2char = language.language_code_2char
update_keywords(gloss, "keywords_" + language_code_2char, new_value)
gloss.save()
continue
if fieldname == 'SignLanguages':
new_human_value_list = [v.strip() for v in new_value.split(',')]
update_signlanguage(gloss,None,new_human_value_list)
gloss.save()
continue
if fieldname == 'Dialects':
new_human_value_list = [v.strip() for v in new_value.split(',')]
update_dialect(gloss,None,new_human_value_list)
gloss.save()
continue
if fieldname == 'Dataset':
# this has already been checked for existance and permission in the previous step
# get dataset identifier
if new_value == 'None':
# don't allow the user to erase the current dataset, this should have already been caught
print('csv import make changes error: gloss ', gloss.id, ' attempt to set dataset to empty')
continue
else:
# the existence of the new dataset should have already been tested
new_dataset = Dataset.objects.get(acronym=new_value)
try:
gloss_lemma = gloss.lemma
except:
# this error should not happen
print('csv import make changes error: gloss ', gloss.id, ' gloss.lemma is empty, cannot set dataset')
continue
# this could have an unwanted side effect on the Lemma translations?
gloss_lemma.dataset = new_dataset
gloss_lemma.save()
continue
if fieldname == 'Sequential Morphology':
new_human_value_list = [v.strip() for v in new_value.split(',')]
update_sequential_morphology(gloss,None,new_human_value_list)
continue
if fieldname == 'Simultaneous Morphology':
new_human_value_list = [v.strip() for v in new_value.split(',')]
update_simultaneous_morphology(gloss,None,new_human_value_list)
continue
if fieldname == 'Blend Morphology':
new_human_value_list = [v.strip() for v in new_value.split(',')]
update_blend_morphology(gloss,None,new_human_value_list)
continue
if fieldname == 'Relations to other signs':
new_human_value_list = [v.strip() for v in new_value.split(',')]
subst_relations(gloss,None,new_human_value_list)
continue
if fieldname == 'Relations to foreign signs':
new_human_value_list = [v.strip() for v in new_value.split(',')]
subst_foreignrelations(gloss,None,new_human_value_list)
continue
if fieldname == 'Tags':
new_human_value_list = [v.strip().replace(' ','_') for v in new_value.split(',')]
update_tags(gloss,None,new_human_value_list)
continue
if fieldname == 'Notes':
subst_notes(gloss,None,new_value)
continue
with override(settings.LANGUAGE_CODE):
#Replace the value for bools
if fieldname in Gloss._meta.get_fields() and Gloss._meta.get_field(fieldname).__class__.__name__ == 'NullBooleanField':
if new_value in ['true','True', 'TRUE']:
new_value = True
elif new_value == 'None' or new_value == 'Neutral':
new_value = None
else:
new_value = False
#Remember this for renaming the video later
if fieldname == 'idgloss':
video_path_before = settings.WRITABLE_FOLDER+gloss.get_video_path()
#The normal change and save procedure
setattr(gloss,fieldname,new_value)
gloss.save()
#Also update the video if needed
if fieldname == 'idgloss':
video_path_after = settings.WRITABLE_FOLDER+gloss.get_video_path()
if os.path.isfile(video_path_before):
os.rename(video_path_before,video_path_after)
stage = 2
#Show uploadform
else:
stage = 0
if stage and not changes and not error:
# no changes were found in the input file. print a message as feedback
# this is needed in order to have output that can be tested for in the unit tests
messages.add_message(request, messages.INFO, ('No changes were found.'))
return render(request,'dictionary/import_csv_update.html',{'form':uploadform,'stage':stage,'changes':changes,
'creation':creation,
'gloss_already_exists':gloss_already_exists,
'error':error,
'dataset_languages':dataset_languages,
'selected_datasets':selected_datasets,
'translation_languages_dict': translation_languages_dict,
'seen_datasets': seen_datasets,
'SHOW_DATASET_INTERFACE_OPTIONS': settings.SHOW_DATASET_INTERFACE_OPTIONS})
def import_csv_lemmas(request):
user = request.user
import guardian
user_datasets = guardian.shortcuts.get_objects_for_user(user,'change_dataset',Dataset)
user_datasets_names = [ dataset.acronym for dataset in user_datasets ]
selected_datasets = get_selected_datasets_for_user(user)
dataset_languages = Language.objects.filter(dataset__in=selected_datasets).distinct()
translation_languages_dict = {}
# this dictionary is used in the template, it maps each dataset to a list of tuples (English name of dataset, language_code_2char)
for dataset_object in user_datasets:
translation_languages_dict[dataset_object] = []
for language in dataset_object.translation_languages.all():
language_name = getattr(language, settings.DEFAULT_LANGUAGE_HEADER_COLUMN['English'])
language_tuple = (language_name, language.language_code_2char)
translation_languages_dict[dataset_object].append(language_tuple)
seen_datasets = []
seen_dataset_names = []
# fatal errors are duplicate column headers, data in columns without headers
# column headers that do not correspond to database fields
# non-numerical lemma ids
# non-existent dataset or no permission for dataset
# attempt to update lemmas in multiple datasets in the same csv
# missing Dataset column
# missing Lemma required for the dataset
uploadform = signbank.dictionary.forms.CSVUploadForm
changes = []
error = []
earlier_updates_same_csv = []
earlier_updates_lemmaidgloss = {}
#Propose changes
if len(request.FILES) > 0:
fatal_error = False
csv_text = request.FILES['file'].read().decode('UTF-8')
csv_lines = re.compile('[\r\n]+').split(csv_text) # split the csv text on any combination of new line characters
keys = {} # in case something goes wrong in header row
for nl, line in enumerate(csv_lines):
#The first line contains the keys
if nl == 0:
keys = line.strip().split(',')
num_keys = len(keys)
continue
elif len(line) == 0:
continue
values = csv.reader([line]).__next__()
value_dict = {}
for nv,value in enumerate(values):
try:
if keys[nv]:
if keys[nv] in value_dict.keys():
e = 'Duplicate header column found: ' + keys[nv]
error.append(e)
fatal_error = True
break
value_dict[keys[nv]] = value
elif value:
# empty column header
e = 'Row '+str(nl + 1) + ': Extra data found in column without header: ' + value
error.append(e)
fatal_error = True
break
except IndexError:
e = 'Row '+str(nl + 1) + ': Index error in column: ' + str(nv)
error.append(e)
fatal_error = True
break
if fatal_error:
break
if 'Lemma ID' in value_dict:
try:
pk = int(value_dict['Lemma ID'])
except ValueError:
e = 'Row '+str(nl + 1) + ': Lemma ID must be numerical: ' + str(value_dict['Lemma ID'])
error.append(e)
fatal_error = True
else:
e = 'Lemma ID required to update lemmas.'
error.append(e)
fatal_error = True
if fatal_error:
break
if 'Dataset' not in value_dict:
e1 = 'The Dataset column is required.'
error.append(e1)
break
# construct list of allowed columns
required_columns = ['Lemma ID', 'Dataset']
dataset_name = value_dict['Dataset'].strip()
# catch possible empty values for dataset, primarily for pretty printing error message
if dataset_name == '' or dataset_name == None or dataset_name == 0 or dataset_name == 'NULL':
e_dataset_empty = 'Row ' + str(nl + 1) + ': The Dataset is missing.'
error.append(e_dataset_empty)
break
if dataset_name not in seen_dataset_names:
try:
dataset = Dataset.objects.get(acronym=dataset_name)
except:
print('exception trying to get dataset object')
# An error message should be returned here, the dataset does not exist
e_dataset_not_found = 'Row '+str(nl + 1) + ': Dataset %s' % value_dict['Dataset'].strip() + ' does not exist.'
error.append(e_dataset_not_found)
fatal_error = True
break
if dataset_name not in user_datasets_names:
e3 = 'Row '+str(nl + 1) + ': You are not allowed to change dataset %s.' % value_dict['Dataset'].strip()
error.append(e3)
fatal_error = True
break
if dataset not in selected_datasets:
e3 = 'Row '+str(nl + 1) + ': Please select the dataset %s.' % value_dict['Dataset'].strip()
error.append(e3)
fatal_error = True
break
if seen_datasets:
# already seen a dataset
if dataset in seen_datasets:
pass
else:
# seen more than one dataset
e3 = 'Row ' + str(nl + 1) + ': Seen more than one dataset: %s.' % value_dict['Dataset'].strip()
error.append(e3)
fatal_error = True
break
else:
seen_datasets.append(dataset)
seen_dataset_names.append(dataset_name)
# saw the first dataset
if fatal_error:
break
# The Lemma ID Gloss may already exist.
lemmaidglosstranslations = {}
contextual_error_messages_lemmaidglosstranslations = []
for language in dataset.translation_languages.all():
language_name = getattr(language, settings.DEFAULT_LANGUAGE_HEADER_COLUMN['English'])
column_name = "Lemma ID Gloss (%s)" % language_name
required_columns.append(column_name)
if column_name in value_dict:
lemma_idgloss_value = value_dict[column_name].strip()
# also stores empty values
lemmaidglosstranslations[language] = lemma_idgloss_value
# if we get to here, | |
dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in | |
name in svm_metrics:
exec('{}_cv_train = np.mean({}_cv_train)'.format(name, name))
exec('mean{}_shuffle_train.append({}_cv_train)'.format(name, name))
exec('{}_cv_test = np.mean({}_cv_test)'.format(name, name))
exec('mean{}_shuffle_test.append({}_cv_test)'.format(name, name))
# 计算所有shuffle的平均值作为该参数组合下的最终结果
for name in svm_metrics:
exec('mean{}_shuffle_train = np.mean(mean{}_shuffle_train)'.format(name, name))
exec('mean{}_weight_train.append(mean{}_shuffle_train)'.format(name, name))
exec('mean{}_shuffle_test = np.mean(mean{}_shuffle_test)'.format(name, name))
exec('mean{}_weight_test.append(mean{}_shuffle_test)'.format(name, name))
# 为了简洁,将训练、测试过程中的指标平均值以字典形式存储,再返回
train_means = {}
test_means = {}
for name in svm_metrics:
exec("train_means['{}'] = mean{}_weight_train".format(name, name))
exec("test_means['{}'] = mean{}_weight_test".format(name, name))
# 将权重组合的名称存为dict
weight_name = {}
weight_name['weight1'] = weight1_col
weight_name['weight2'] = weight2_col
# 将以上指标网格以DataFrame格式存储为csv格式
train_pd, train_ndarray = [], []
test_pd, test_ndarray = [], []
for name in svm_metrics:
exec("train_ndarray.append(train_means['{}'])".format(name))
exec("test_ndarray.append(test_means['{}'])".format(name))
train_ndarray = np.array(train_ndarray)
test_ndarray = np.array(test_ndarray)
train_pd = pd.DataFrame(train_ndarray, index=svm_metrics, columns=['weight1='+str(round(i, 2)) for i in weight_name['weight1']])
test_pd = pd.DataFrame(test_ndarray, index=svm_metrics, columns=['weight1='+str(round(i, 2)) for i in weight_name['weight1']])
train_pd.to_csv(os.path.join(self.path, 'grid_result_train.csv'))
test_pd.to_csv(os.path.join(self.path, 'grid_result_test.csv'))
return train_means, test_means, weight_name
def mksvm3(self, para, kernel_dict, svm_metrics, shuffle_time=100, outer=10):
'''多核SVM的实现,只适用于三模态,网格搜索法只对融合系数有效,SVM的参数需要提前设定好
输入:
outer: 每一次shuffle进行交叉验证时,交叉验证的折数
shuffle_time: 进行shuffle的次数
kernel_dict: dict型,包含的参数有:
'kernel_type1': 第一个核的类型,支持linear, poly, rbf
'kernel_type2': 第二个核的类型,支持linear, poly, rbf
'kernel_type3': 第三个核的类型,支持linear, poly, rbf
'kernel_weight1': 第一个核的融合系数
'kernel_weight2': 第二个核的融合系数
'kernel_weight3': 第三个核的融合系数,注意kernel_weight1, kernel_weight2, kernel_weight3相加必须为1
para: dict型,包含的参数有:
'kernel1': 第一个核的参数,如果linear没有参数,poly型即degree,rbf型即位gamma
'kernel2': 第二个核的参数,如果linear没有参数,poly型即degree,rbf型即位gamma
'kernel3': 第三个核的参数,如果linear没有参数,poly型即degree,rbf型即位gamma
'C': 支持向量机的参数C
svm_metrics: list型,SVM输出结果后需要计算的指标,目前支持accuracy, precision, recall, f1, sensitivity, specificity。
必须写着完整名,不能用缩写
输出:
train_means: dict型,键值对应于svm_metrics定义的指标,对应的值为list型,具体是shuffle_time次数的训练集中对应指标
在交叉验证过程中的平均值,该dict返回后可以通过mean来求总体的平均值。
train_std: 和上面的dict类似,不同的是计算的是所有shuffle的标准差而不是均值
test_means: 和上面的dict类似,不用的是计算的是测试集中的均值
test_std: 和上面的dict类似,计算的是测试集中的标准差
roc_dict: dict型,返回的是与绘制ROC相关的list,包含:
tpr_train: list,训练集中每次shuffle的tpr交叉验证平均值
tpr_test: 测试集中每次shuffle的tpr交叉验证平均值
tpr_list_train: 二维list,训练集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
tpr_list_test: 二维list,测试集每次shuffle交叉验证每折的tpr都会存储,一次shuffle一个list
fpr_train: list, 训练集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
fpr_test: list, 测试集中每次shuffle的tpr交叉验证平均值(其实是自定义长度的等差数列)
auc_list_train: list, 记录了训练集每次shuffle计算得到的AUC
auc_list_test: list, 记录了测试集每次shuffle计算得到的AUC
auc_train: float, 训练集上所有shuffle的AUC的平均值
auc_test: float, 测试集上所有shuffle的AUC的平均值
前四个dict主要是为了绘制shuffle和每种指标的关系图,mean用于绘制指标的曲线,std可以绘制标准差的变化区域
roc_dict真正实用的是tpr_train, tpr_test, fpr_train, fpr_test,这四个list再各自做平均后就可以获取绘制ROC的所有参数,
auc_list可以绘制shuffle和AUC的曲线图,其他的值用处不大,仅仅以防万一要用
'''
# 将svm参数写入txt文档
svm_shuffle_path = os.path.join(self.path, 'mkl_svm')
os.makedirs(svm_shuffle_path, exist_ok=True)
svm_txt = open(os.path.join(self.path, 'mkl_svm_result.txt'), 'w')
svm_txt.write('Multi-Kernel Support Vector Machine parameters set:\n')
svm_txt.write('C value: % s' % para['C'])
svm_txt.write('\nShuffle time: % s' % shuffle_time)
svm_txt.write('\nCross validation-fold: % s' % outer)
svm_txt.write('\nsvm metrics: % s\n' % svm_metrics)
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('Kernel-1 parameter set:')
svm_txt.write('\nKernel type: % s' % kernel_dict['kernel_type1'])
svm_txt.write('\nKernel weight: % s' % kernel_dict['kernel_weight1'])
svm_txt.write('\nKernel parameter: % s' % para['kernel1'])
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('Kernel-2 parameter set:\n')
svm_txt.write('\nKernel type: % s' % kernel_dict['kernel_type2'])
svm_txt.write('\nKernel weight: % s' % kernel_dict['kernel_weight2'])
svm_txt.write('\nKernel parameter: % s' % para['kernel2'])
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('Kernel-3 parameter set:\n')
svm_txt.write('\nKernel type: % s' % kernel_dict['kernel_type3'])
svm_txt.write('\nKernel weight: % s' % kernel_dict['kernel_weight3'])
svm_txt.write('\nKernel parameter: % s' % para['kernel3'])
# 初始化每次shuffle的的各类指标
for name in svm_metrics:
exec('train_{}_means = []'.format(name))
exec('train_{}_std = []'.format(name))
exec('test_{}_means = []'.format(name))
exec('test_{}_std = []'.format(name))
# 直接将fpr定义为等差数列
meanfpr_shuffle_train = np.linspace(0, 1, 100)
meanfpr_shuffle_test = np.linspace(0, 1, 100)
# 将tpr和auc定义为空,最终tpr_outer_test和meanfpr_outer_test的长度相同,auc和shuffle的次数相同
tpr_shuffle_train = []
auc_list_train = []
tpr_shuffle_test = []
auc_list_test = []
for i in range(shuffle_time):
outer_cv = StratifiedKFold(n_splits=outer, shuffle=True, random_state=i)
# 初始化交叉验证过程中,每一折的各类指标
for name in svm_metrics:
exec('{}_cv_train = []'.format(name))
exec('{}_cv_test = []'.format(name))
# 初始化交叉验证过程中
tpr_fold_train = []
tpr_fold_test = []
# 每一次shuffle的交叉验证过程
for train, test in outer_cv.split(self.X1, self.y):
# 将数据分为训练集和测试集,mat1和mat2分别为训练集和测试集特征矩阵,mod1和mod2分别为两个模态
mod1_mat1 = self.X1[train]
mod1_mat2 = self.X1[test]
mod2_mat1 = self.X2[train]
mod2_mat2 = self.X2[test]
mod3_mat1 = self.X3[train]
mod3_mat2 = self.X3[test]
y_train = self.y[train]
y_test = self.y[test]
# 调取两个核函数的融合系数
weight1 = kernel_dict['kernel_weight1']
weight2 = kernel_dict['kernel_weight2']
weight3 = kernel_dict['kernel_weight3']
# 计算核矩阵,先分别给两个模态定义实例
kernel_mod1 = kernel(kernel_type=kernel_dict['kernel_type1'],
kernel_para=para['kernel1'])
kernel_mod2 = kernel(kernel_type=kernel_dict['kernel_type2'],
kernel_para=para['kernel2'])
kernel_mod3 = kernel(kernel_type=kernel_dict['kernel_type3'],
kernel_para=para['kernel3'])
# 计算mod1的训练集核矩阵与测试集核矩阵
mod1_train = kernel_mod1.calckernel(mod1_mat1)
mod1_test = kernel_mod1.calckernel(mod1_mat1, mod1_mat2)
# 计算mod2的训练集核矩阵与测试集核矩阵
mod2_train = kernel_mod2.calckernel(mod2_mat1)
mod2_test = kernel_mod2.calckernel(mod2_mat1, mod2_mat2)
# 计算mod3的训练集核矩阵与测试集核矩阵
mod3_train = kernel_mod3.calckernel(mod3_mat1)
mod3_test = kernel_mod3.calckernel(mod3_mat1, mod3_mat2)
# 进行三模态核矩阵融合
train_kernel = weight1 * mod1_train + weight2 * mod2_train + weight3 * mod3_train
test_kernel = weight1 * mod1_test + weight2 * mod2_test + weight3 * mod3_test
# X_corr = pd.DataFrame(train_kernel)
# corr_mat = X_corr.corr()
# fig, axe = plt.subplots(figsize=(12, 9))
# sns.heatmap(corr_mat, ax=axe)
# fig.savefig(os.path.join('feature_corr_heatmap% s.png' % i))
# svm训练
svm = SVC(kernel='precomputed', C=para['C'], probability=True)
svm.fit(train_kernel, y_train)
# 得到训练集和测试集上的输出
pred_train = svm.predict(train_kernel)
prob_train = svm.predict_proba(train_kernel)
pred_test = svm.predict(test_kernel)
prob_test = svm.predict_proba(test_kernel)
mics = classifier_mics(y_train, pred_train, prob_train,
y_test, pred_test, prob_test, path='mkl_svm')
# 计算每一折的各类指标
accuracy_train, precision_train, recall_train, f1_train = mics.mics_sum_train()
accuracy_test, precision_test, recall_test, f1_test = mics.mics_sum_test()
sensitivity_train, sensitivity_test = mics.sensitivity()
specificity_train, specificity_test = mics.specificity()
# 虽然各类指标都算了,但是只向list中添加svm_metrics中要求给的
for name in svm_metrics:
exec('{}_cv_train.append({}_train)'.format(name, name))
exec('{}_cv_test.append({}_test)'.format(name, name))
# 计算fpr和tpr
fpr_train, tpr_train, thres_train = roc_curve(y_train, prob_train[:, 1])
fpr_test, tpr_test, thres_test = roc_curve(y_test, prob_test[:, 1])
# 根据meanfpr_outer_test的长度,通过fpr和tpr的范围进行插值
tpr_fold_train.append(np.interp(meanfpr_shuffle_train, fpr_train, tpr_train))
tpr_fold_test.append(np.interp(meanfpr_shuffle_test, fpr_test, tpr_test))
tpr_fold_train[-1][0] = 0.0
tpr_fold_test[-1][0] = 0.0
# 交叉验证所有折的平均值
meantpr_fold_train = np.mean(tpr_fold_train, axis=0)
meantpr_fold_test = np.mean(tpr_fold_test, axis=0)
tpr_shuffle_train.append(meantpr_fold_train)
tpr_shuffle_test.append(meantpr_fold_test)
auc_train = auc(meanfpr_shuffle_train, meantpr_fold_train)
auc_test = auc(meanfpr_shuffle_test, meantpr_fold_test)
auc_list_train.append(auc_train)
auc_list_test.append(auc_test)
# 每次shuffle的每个指标都会计算出mean和std,并存储在train/test_{}_means, train/test_{}_std
for name in svm_metrics:
# 存储训练过程中交叉验证每个指标的平均值
exec('{}_cv_train = np.array({}_cv_train)'.format(name, name))
exec("train_{}_means.append({}_cv_train.mean())".format(name, name))
# 存储训练过程中交叉验证每个指标的标准差
exec("train_{}_std.append({}_cv_train.std())".format(name, name))
# 存储测试过程中交叉验证每个指标的平均值
exec('{}_cv_test = np.array({}_cv_test)'.format(name, name))
exec("test_{}_means.append({}_cv_test.mean())".format(name, name))
# 存储测试过程中交叉验证每个指标的标准差
exec("test_{}_std.append({}_cv_test.std())".format(name, name))
# 多次shuffle结果的平均值
meantpr_shuffle_train = np.mean(tpr_shuffle_train, axis=0)
meantpr_shuffle_test = np.mean(tpr_shuffle_test, axis=0)
final_auc_train = auc(meanfpr_shuffle_train, meantpr_shuffle_train)
final_auc_test = auc(meanfpr_shuffle_test, meantpr_shuffle_test)
# 为了简洁,将训练、测试过程中的指标平均值和标准差以字典形式存储,再返回
train_means = {}
train_std = {}
test_means = {}
test_std = {}
for name in svm_metrics:
exec("train_means['{}'] = train_{}_means".format(name, name))
exec("train_std['{}'] = train_{}_std".format(name, name))
exec("test_means['{}'] = test_{}_means".format(name, name))
exec("test_std['{}'] = test_{}_std".format(name, name))
# 为了简洁,将绘制ROC曲线有关的变量用一个dict来表示
roc_dict = {}
roc_dict['tpr_train'] = meantpr_shuffle_train
roc_dict['tpr_test'] = meantpr_shuffle_test
roc_dict['tpr_list_train'] = tpr_shuffle_train
roc_dict['tpr_list_test'] = tpr_shuffle_test
roc_dict['fpr_train'] = meanfpr_shuffle_train
roc_dict['fpr_test'] = meanfpr_shuffle_test
roc_dict['auc_list_train'] = auc_list_train
roc_dict['auc_list_test'] = auc_list_test
roc_dict['auc_train'] = final_auc_train
roc_dict['auc_test'] = final_auc_test
# 将输出存在txt文件中
for name in svm_metrics:
svm_txt.write('\n---------------------------------------------\n')
exec("svm_txt.write('Train set {} mean value: % s' % np.mean(train_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Train set {} max value: % s' % np.max(train_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Train set {} min value: % s' % np.min(train_means['{}']))".format(name, name))
svm_txt.write('\n---------------------------------------------\n')
exec("svm_txt.write('Test set {} mean value: % s' % np.mean(test_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Test set {} max value: % s' % np.max(test_means['{}']))".format(name, name))
svm_txt.write('\n')
exec("svm_txt.write('Test set {} min value: % s' % np.min(test_means['{}']))".format(name, name))
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('\nTrain set AUC mean value: % s' % np.mean(roc_dict['auc_list_train']))
svm_txt.write('\nTrain set AUC max value: % s' % np.max(roc_dict['auc_list_train']))
svm_txt.write('\nTrain set AUC min value: % s' % np.min(roc_dict['auc_list_train']))
svm_txt.write('\n---------------------------------------------\n')
svm_txt.write('\nTest set AUC mean value: % s' % np.mean(roc_dict['auc_list_test']))
svm_txt.write('\nTest set AUC max value: % s' % np.max(roc_dict['auc_list_test']))
svm_txt.write('\nTest set AUC min value: % s' % np.min(roc_dict['auc_list_test']))
return train_means, train_std, test_means, test_std, roc_dict
def mksvm3_grid(self, para, kernel_dict, svm_metrics, shuffle_time=100, cv=10, grid_num=10):
'''多核SVM的实现,只适用于三模态,网格搜索法只对融合系数有效,SVM的参数需要提前设定好
输入:
grid_num: 网格搜索法,从0到1的参数个数,例如grid_num=10就是0到0.9这10个参数
cv: 每一个网格参数组合进行交叉验证时,交叉验证的折数
shuffle_time: 进行shuffle的次数
输出:
train_means: dict型,svm_metrics中要求的指标,训练集中所有shuffle结果的平均值存入
test_means: dict型,svm_metrics中要求的指标,测试集中所有shuffle结果的平均值存入
weight_name: dict型,包含weight1和weight2两个key,里面存的list是两个模态融合系数的值,顺序是对应的
'''
# 初始化每种权重下的指标list
for name in svm_metrics:
exec('mean{}_weight_train = []'.format(name))
exec('mean{}_weight_test = []'.format(name))
# 初始化每种特征组合的名称
weight1_col = []
weight2_col = []
weight3_col = []
# 根据grid_num创建等差数列
# grid_list = np.arange(0, 1, 1/grid_num)
# 每种融合系数
for weight1 in list(np.arange(0, 1, 1/grid_num)):
for weight2 in list(np.arange(0, 1-weight1, 1/grid_num)):
# 为两个模态的核矩阵融合系数赋值
# weight1 = weight1 * 0.1
# weight2 = weight2 * 0.1
weight3 = 1 - weight1 - weight2
# 将融合系数存入list
weight1_col.append(weight1)
weight2_col.append(weight2)
weight3_col.append(weight3)
# 根据svm_metrics中要求的指标进行初始化
for name in svm_metrics:
exec('mean{}_shuffle_train = []'.format(name))
exec('mean{}_shuffle_test = []'.format(name))
# 开始每种模态下的shuffle
for i in range(shuffle_time):
outer = StratifiedKFold(n_splits=cv, shuffle=True, random_state=i)
# 初始化每次shuffle交叉验证的各类指标list
for name in svm_metrics:
exec('{}_cv_train = []'.format(name))
exec('{}_cv_test = []'.format(name))
# 开始每次shuffle的交叉验证过程
for train, test in outer.split(self.X1, self.y):
# 将数据分为训练集和测试集,mat1和mat2分别为训练集和测试集特征矩阵,mod1和mod2分别为两个模态
mod1_mat1 = self.X1[train]
mod1_mat2 = self.X1[test]
mod2_mat1 = self.X2[train]
mod2_mat2 = self.X2[test]
mod3_mat1 = self.X3[train]
mod3_mat2 = self.X3[test]
y_train = self.y[train]
y_test = self.y[test]
# 计算核矩阵,先分别给两个模态定义实例
kernel_mod1 = kernel(kernel_type=kernel_dict['kernel_type1'],
kernel_para=para['kernel1'])
kernel_mod2 = kernel(kernel_type=kernel_dict['kernel_type2'],
kernel_para=para['kernel2'])
kernel_mod3 = kernel(kernel_type=kernel_dict['kernel_type3'],
kernel_para=para['kernel3'])
# 计算mod1的训练集核矩阵与测试集核矩阵
mod1_train = kernel_mod1.calckernel(mod1_mat1)
mod1_test = kernel_mod1.calckernel(mod1_mat1, mod1_mat2)
# 计算mod2的训练集核矩阵与测试集核矩阵
mod2_train = kernel_mod2.calckernel(mod2_mat1)
mod2_test = kernel_mod2.calckernel(mod2_mat1, mod2_mat2)
# 计算mod3的训练集核矩阵与测试集核矩阵
mod3_train = kernel_mod3.calckernel(mod3_mat1)
mod3_test = kernel_mod3.calckernel(mod3_mat1, mod3_mat2)
# 进行双模态核矩阵融合
train_kernel = weight1 * mod1_train + weight2 * mod2_train + weight3 * mod3_train
test_kernel = weight1 * mod1_test + weight2 * mod2_test + weight3 * mod3_test
# svm训练
svm = SVC(kernel='precomputed', C=para['C'], probability=True)
svm.fit(train_kernel, y_train)
# 得到训练集和测试集上的输出
pred_train = svm.predict(train_kernel)
prob_train = svm.predict_proba(train_kernel)
pred_test = svm.predict(test_kernel)
prob_test = svm.predict_proba(test_kernel)
mics = classifier_mics(y_train, pred_train, prob_train,
y_test, pred_test, prob_test, path=self.path)
# 计算每一折的各类指标
accuracy_train, precision_train, recall_train, f1_train = mics.mics_sum_train()
accuracy_test, precision_test, recall_test, f1_test = mics.mics_sum_test()
sensitivity_train, sensitivity_test = mics.sensitivity()
specificity_train, specificity_test = mics.specificity()
# 虽然各类指标都算了,但是只向list中添加svm_metrics中要求给的
for name in svm_metrics:
exec('{}_cv_train.append({}_train)'.format(name, name))
exec('{}_cv_test.append({}_test)'.format(name, name))
# 交叉验证结束后,计算每折的平均值作为此次交叉验证的结果,并将该结果存入shuffle的list中
for name in svm_metrics:
exec('{}_cv_train = np.mean({}_cv_train)'.format(name, name))
exec('mean{}_shuffle_train.append({}_cv_train)'.format(name, name))
exec('{}_cv_test = np.mean({}_cv_test)'.format(name, name))
exec('mean{}_shuffle_test.append({}_cv_test)'.format(name, name))
# 计算所有shuffle的平均值作为该参数组合下的最终结果
for name in svm_metrics:
exec('mean{}_shuffle_train = np.mean(mean{}_shuffle_train)'.format(name, name))
exec('mean{}_weight_train.append(mean{}_shuffle_train)'.format(name, name))
exec('mean{}_shuffle_test = np.mean(mean{}_shuffle_test)'.format(name, name))
exec('mean{}_weight_test.append(mean{}_shuffle_test)'.format(name, name))
# 为了简洁,将训练、测试过程中的指标平均值以字典形式存储,再返回
train_means = {}
test_means = {}
for name in svm_metrics:
exec("train_means['{}'] = mean{}_weight_train".format(name, name))
exec("test_means['{}'] = mean{}_weight_test".format(name, name))
# 将权重组合的名称存为dict
weight_name = {}
weight_name['weight1'] = weight1_col
weight_name['weight2'] = weight2_col
weight_name['weight3'] = weight3_col
# 将以上指标网格以DataFrame格式存储为csv格式
train_pd, train_ndarray = [], []
test_pd, test_ndarray = [], []
for | |
<reponame>ColeWeinstein/cs257
'''
olympics.py
A command line program used to query data from the related olympics database.
Code by <NAME>, 21 October 2021
Credits: <NAME> - psycopg2-sample.py
For use in the "olympics" assignment from Carleton's
CS 257 Software Design class, Fall 2021.
'''
import argparse
import config
import psycopg2
def get_parsed_arguments():
'''
Gets arguments from command line.
'''
# Help descriptions for each argument and the argparser.
arg_parse_description = '''Finds information about the athletes registered under a specific NOC (National Olympic Committee), the athletes who have participated in a given event, the athletes who participated in a certain year, and the athletes who have medaled.
Additionally, finds the number of gold medals each NOC has won.
The -e, -m, -n, and -y flags can all be combined in any order. The -g flag can only be modified by the -e and -y flags.'''
noc_help = 'Queries the olympics database for every athlete from a given NOC'
gold_help = 'Queries the olympics database for every NOC sorted by the number of gold medals won'
event_help = 'Queries the olympics database for every athlete that has participated in the given event. When used with -n, restricts the query to all athletes from a certain NOC who have also participated in the specified event. When used with -g, restricts the query to all medals won by an NOC in the specified event.'
year_help = 'Queries the olympics database for every athlete that participated in the given year. When used with -n, restricts the query to all athletes from a certain NOC who participated in the given year. When used with -g, restricts the query to all medals won by each NOC in a certain year. When used with -e, restricts the query to all athlete from a certain event who participated in the given year.'
medal_help = 'Queries the olympics database for every athlete that has medaled, sorted by the number of medals won. When used with -n, -e, or -y restricts the query to all athletes who have medaled.'
# Creates an argument parser and all flags for the program. --author, --title, and --year are all mutually exclusive, as are --titlesort and --yearsort.
parser = argparse.ArgumentParser(description=arg_parse_description)
parser.add_argument('-n', '--noc', metavar='NOC_CODE', nargs=1, type=str, help=noc_help)
parser.add_argument('-g', '--gold', action='store_true', help=gold_help)
parser.add_argument('-e', '--event', metavar='EVENT_NAME', nargs=1, type=str, help=event_help)
parser.add_argument('-y', '--year', metavar='YEAR', nargs=1, type=int, help=year_help)
parser.add_argument('-m', '--medal', action='store_true', help=medal_help)
parsed_arguments = parser.parse_args()
# Prevents -g flag from being used with -a or -m flag
if parsed_arguments.gold and (parsed_arguments.noc or parsed_arguments.medal):
parser.error('-g/--gold cannot be used with -n/--noc or -m/--medal')
return parsed_arguments
def form_variable_query(noc_code, event, medal, year):
query = 'SELECT '
fields = ['athletes.athlete_name', 'noc_regions.code', 'noc_regions.region', 'events.event', 'sports.sport', 'games.title', 'medals.medal']
tables = ['athletes', 'athletes_biometrics', 'athletes_super_table', 'noc_regions', 'events', 'sports', 'games', 'medals']
where_statements = ['athletes_biometrics.athletes_id = athletes.id', 'athletes_super_table.athletes_biometrics_id = athletes_biometrics.athletes_id', 'athletes_super_table.noc_id = noc_regions.id', 'athletes_super_table.event_id = events.id', 'events.sport_id = sports.id', 'athletes_super_table.games_id = games.id', 'athletes_super_table.medal_id = medals.id']
# The commented line in this if statement should be in the code, and they work. However, for clarity purposes, they have been excluded to prove that the query returned the correct results.
if noc_code:
#fields.remove('noc_regions.code')
fields.remove('noc_regions.region')
where_statements.append('noc_regions.code LIKE \'{noc_code}\'')
if event:
fields.remove('sports.sport')
#fields.remove('events.event')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
#fields.remove('games.title')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
if medal:
where_statements.append('medals.medal NOT LIKE \'NA\'')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
# Orders the list by the type of medals won (adapted from https://stackoverflow.com/questions/6332043/sql-order-by-multiple-values-in-specific-order)
if medal:
query += '''ORDER BY
CASE
WHEN medals.medal = \'Gold\' THEN 1
WHEN medals.medal = \'Silver\' THEN 2
WHEN medals.medal = \'Bronze\' THEN 3
END'''
query += ';'
print(query)
return query
def form_golden_query(event, year):
query = 'SELECT '
fields = ['COUNT(medals.medal)', 'noc_regions.region']
tables = ['medals', 'athletes_super_table', 'noc_regions']
where_statements = ['athletes_super_table.medal_id = medals.id', 'medals.medal LIKE \'Gold\'', 'athletes_super_table.noc_id = noc_regions.id']
if event:
tables.append('events')
tables.append('sports')
where_statements.append('athletes_super_table.event_id = events.id')
where_statements.append('events.sport_id = sports.id')
where_statements.append('events.event LIKE \'{event_name}\'')
if year:
tables.append('games')
where_statements.append('athletes_super_table.games_id = games.id')
where_statements.append('cast(games.year AS TEXT) LIKE cast(\'{games_year}\' AS TEXT)')
for item in fields:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'FROM '
for item in tables:
query += item + ', '
# Removes the last comma from the query string
query = query[:-2] + '\n'
query += 'WHERE '
for item in where_statements:
query += item + '\nAND '
# Removes the last 'AND' from the query string
query = query[:-4]
query += 'GROUP BY noc_regions.region\n'
query += 'ORDER BY COUNT(medals.medal) DESC, noc_regions.region;'
return query
def run_variable_query(cursor, noc_code='', event_name='', medal=False, games_year=0):
noc = False
if noc_code != '':
noc = True
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_variable_query(noc, event, medal, year)
try:
cursor.execute(query.format(noc_code=noc_code, event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def run_golden_query(cursor, event_name='', games_year=0):
event = False
if event_name != '':
event = True
year = False
if games_year != 0:
year = True
query = form_golden_query(event, year)
try:
cursor.execute(query.format(event_name=event_name, games_year=games_year))
except Exception as e:
print(e)
exit()
def fix_single_quotes(broken_string):
temp_string_array = broken_string.split('\'')
fixed_string = ''
for substring in temp_string_array:
fixed_string += substring + '%'
fixed_string = fixed_string[:-1]
return fixed_string
def user_input_identifier(cursor, input_string, field, table):
query = 'SELECT {table}.{primary_field}'
if table == 'noc_regions':
query += ', noc_regions.region'
elif table == 'events':
query += ', sports.sport'
query += ' FROM {table}'
if table == 'events':
query += ', sports'
query += ' WHERE cast({table}.{primary_field} AS TEXT) ILIKE cast(\'%{input_string}%\' AS TEXT)'
if table == 'noc_regions':
query += ' OR noc_regions.region ILIKE \'%{input_string}%\''
query += ' GROUP BY noc_regions.code, noc_regions.region'
elif table == 'events':
query += ' AND events.sport_id = sports.id'
query += ';'
try:
cursor.execute(query.format(primary_field=field, table=table, input_string=input_string))
except Exception as e:
print(e)
exit()
""" print(query.format(primary_field=field, table=table, input_string=input_string))
print(cursor.rowcount)
exit() """
if cursor.rowcount == 0:
print('That string is not present in the appropriate table. Please run the program again.')
exit()
if cursor.rowcount == 1:
temp_query_list = cursor.fetchall()
if len(temp_query_list) == 2: # When the events or noc_regions table was queried
return temp_query_list[0][0]
return temp_query_list[0][0]
else:
print('Did you mean one of the following?')
if table == 'noc_regions':
print(' Code' + ' ' + 'Region')
print('=' * 30)
elif table == 'events':
print(' Events' + ' ' * (54) + 'Sports')
print('=' * 100)
else:
print(field)
print('=' * 30)
cursor_items = []
line_count = 1
for row in cursor:
if len(row) == 2:
if table == 'noc_regions':
string_to_print = row[0] + ' ' + row[1]
elif table == 'events':
string_to_print = row[0] + ' ' * (60 - len(row[0])) + row[1]
print(str(line_count) + ' ' * (4 - len(str(line_count))) + string_to_print)
else:
print(str(line_count) + ' ' * (4 - len(str(line_count))) + row[0])
cursor_items.append(row[0])
line_count += 1
print()
input_clarifier = input('Which {field} did you mean? (Please enter the field\'s number.) '.format(field=field))
try:
return cursor_items[int(input_clarifier) - 1]
except Exception as e:
print('Error. Invalid input given.')
exit()
def main():
# Connect to the database (database connection code from <NAME>'s psycopg2-sample.py)
try:
connection = psycopg2.connect(database=config.database, user=config.user, password=config.password)
except Exception as e:
print(e)
exit()
try:
cursor = connection.cursor()
except Exception as e:
print(e)
exit()
arguments = get_parsed_arguments()
noc_code = ''
if arguments.noc:
noc_code = user_input_identifier(cursor, arguments.noc[0], 'code', 'noc_regions')
print(noc_code)
event_name = ''
if arguments.event:
event_name = fix_single_quotes(user_input_identifier(cursor, arguments.event[0], 'event', 'events'))
print(event_name)
games_year = 0
if arguments.year:
games_year = user_input_identifier(cursor, arguments.year[0], 'year', 'games')
print(games_year)
medal = False
if arguments.medal:
medal = True
try:
if arguments.gold:
#query = | |
<gh_stars>0
# Chess by <NAME>
WHITE = 'white'
BLACK = 'black'
import random
class Chess:
def __init__(self):
self.turn = WHITE
self.board = {}
self.initialize_board()
self.all_moves = []
self.promotion_required = False
self.promotion_pos = (-1,-1)
def initialize_board(self):
for i in range(8):
self.board[(1,i)] = Pawn(BLACK)
self.board[(6,i)] = Pawn(WHITE)
piece_order = [Rook,Knight,Bishop,Queen,King,Bishop,Knight,Rook]
for i in range(8):
self.board[(0,i)] = piece_order[i](BLACK)
self.board[(7,i)] = piece_order[i](WHITE)
# Append each piece's starting position to its pos_list
for key in self.board:
self.board[key].pos_list.append(key)
def print_board(self, board=None):
board_view = [[' ' for _ in range(8)] for _ in range(8)]
if board is None:
board = self.board
for pos in board:
board_view[pos[0]][pos[1]] = board[pos].uni_char
print('\n A B C D E F G H')
for i, row in enumerate(board_view):
print(str(8-i) + ' ' + str(row) + ' ' + str(8-i))
print(' A B C D E F G H\n')
def move_piece_basic(self, target, destination):
# Move a piece at position 'target' to position 'destination'
# Don't move if destination position is already occupied
if destination in self.board:
print('Destination ' + str(destination) + ' already occupied.')
return
if target not in self.board:
print('No piece located at ' + str(target) + '.')
return
self.board[target].pos_list.append(destination) # Append the destination to the pos_list
self.board[destination] = self.board[target] # Move the piece to a new position
del self.board[target]
self.all_moves.append(destination)
def do_move(self, move):
# Complete a full move
# Returns (start_pos, end_pos, captured_piece(rook_init_pos if castle is True),
# captured_piece_pos (rook_final_pos if castle is True), castle)
captured = None
if not move[1][1]:
# Normal move
if move[1][0] in self.board:
captured = self.board[move[1][0]]
del self.board[move[1][0]]
self.move_piece_basic(move[0], move[1][0])
return (move[0], move[1][0], captured, move[1][0], False)
else:
# Special move
if self.board[move[0]].name == 'pawn':
# En Passant
captured = self.board[move[1][1]]
del self.board[move[1][1]]
self.move_piece_basic(move[0], move[1][0])
return (move[0], move[1][0], captured, move[1][1], False)
if self.board[move[0]].name == 'king':
# Castling
rook_init_pos = (move[0][0],7) if move[1][0][1] == 6 else (move[0][0],0)
self.move_piece_basic(move[0], move[1][0])
self.move_piece_basic(rook_init_pos, move[1][1])
return (move[0], move[1][0], rook_init_pos, move[1][1], True)
def undo_move(self, target, destination):
# Move a piece from 'target' to 'destination'
# Only use to undo moves
self.board[destination] = self.board[target]
del self.board[target]
del self.board[destination].pos_list[-1]
del self.all_moves[-1]
def can_see_king(self, color):
# Tests if any piece of the given color can see the opposite color king
piece_list = [self.board[pos] for pos in self.board if self.board[pos].color == color]
opposite_king_pos = next(pos for pos in self.board if self.board[pos].name == 'king' and self.board[pos].color != color)
for piece in piece_list:
if opposite_king_pos in [move[0] for move in piece.available_moves(self.board, self.all_moves)]:
return True
return False
def is_in_check(self, color):
# Checks if player specified by 'color' is in check
if color == WHITE:
return self.can_see_king(BLACK)
return self.can_see_king(WHITE)
def play_in_terminal(self):
# Main method to play the game
print('\nWelcome to chess.\n\
Move a piece by entering its starting position immediately\n\
followed by its final position (e.g. \'a2a4\' or \'G8F6\'). Enter\n\
\'q\' or \'quit\' to quit. Enter \'h\' or \'help\' for help.')
while True:
quit = False
self.print_board()
if not self.has_move(self.turn):
if self.is_in_check(self.turn):
winner = 'White' if self.turn == BLACK else 'Black'
print('%s wins!' % winner)
break
print('Stalemate!')
break
# Get a move and ensure it is valid
while True:
move = self.get_move()
if move is None:
quit = True
break
if move == 'help':
moves = self.get_available_moves(self.turn)
print('\n%s\'s available moves:' % self.turn.capitalize())
print(moves)
print()
continue
if move[0] not in self.board:
print('There is no piece in that space.')
continue
if self.board[move[0]].color != self.turn:
print('You must move a ' + self.turn + ' piece.')
continue
selected_piece_available_moves = self.board[move[0]].available_moves(self.board, self.all_moves)
valid_dests = [m[0] for m in selected_piece_available_moves]
if not move[1] in valid_dests:
print('Invalid move. Try again.')
continue
dest_plus_info = next(m for m in selected_piece_available_moves if m[0] == move[1])
move = (move[0], dest_plus_info)
if not self.is_valid_move(move):
print('Invalid move. Try again.')
continue
break
if quit: break
# Move pieces and remove captured pieces from the board
self.do_move(move)
# Promotion
promo_row = 0 if self.turn == WHITE else 7
pos = self.all_moves[-1]
if pos[0] == promo_row and self.board[pos].name == 'pawn':
piece_class = self.get_promo_input()
old_pawn = self.board[pos]
del self.board[pos]
self.board[pos] = piece_class(old_pawn.color)
self.board[pos].pos_list = old_pawn.pos_list
self.turn = WHITE if self.turn == BLACK else BLACK
def get_move(self):
try:
move = input(self.turn.capitalize() + '\'s turn. Enter move: ')
move = move.lower()
if move == 'q' or move == 'quit':
return None
if move == 'h' or move == 'help':
return 'help'
return ((8-int(move[1]), ord(move[0])-97), (8-int(move[3]), ord(move[2])-97))
except:
print('Error decoding input. Please try again.')
return self.get_move()
def has_move(self, color):
# Checks if player specified by 'color' has a valid move
# Get all possible moves specified by each pieces available_moves method
moves = []
for pos in self.board:
if self.board[pos].color == color:
piece_moves = self.board[pos].available_moves(self.board, self.all_moves)
for move in piece_moves:
moves.append((pos, move))
for move in moves:
if self.is_valid_move(move):
return True
return False
def is_valid_move(self, move):
# Tests if a move will put the player in check or not
# move is of the form (start_pos, (destination, special_info))
pos = move[0]
destination = move[1][0]
if not move[1][1] or self.board[pos].name != 'king': # If move is not a castling move
# Get the correct piece and place to ressurect (captured piece)
if move[1][1] and self.board[pos].name == 'pawn':
piece_to_ressurect = self.board[move[1][1]]
place_to_ressurect = move[1][1]
else:
piece_to_ressurect = None if destination not in self.board else self.board[destination]
place_to_ressurect = destination
# Remove captured piece if applicable
if piece_to_ressurect:
del self.board[place_to_ressurect]
self.move_piece_basic(pos, destination) # Move piece
in_check = self.is_in_check(self.board[destination].color) # Check if checkmate is present
self.undo_move(destination, pos) # Undo move
# Ressurect the captured piece
if piece_to_ressurect:
self.board[place_to_ressurect] = piece_to_ressurect
return not in_check
# If move is castling
rook_init_pos = (pos[0],7) if move[1][0][1] == 6 else (pos[0],0)
self.move_piece_basic(pos, destination) # Move king
self.move_piece_basic(rook_init_pos, move[1][1]) # Move rook
in_check = self.is_in_check(self.board[destination].color) # Check if checkmate is present
self.undo_move(move[1][1], rook_init_pos) # Unmove rook
self.undo_move(destination, pos) # Unmove king
if in_check:
return False
# Check if king moves through a position where it would be in check
castling_dir = 1 if move[1][0][1] == 6 else -1
self.move_piece_basic(pos, (pos[0], pos[1]+castling_dir))
in_check = self.is_in_check(self.board[(pos[0], pos[1]+castling_dir)].color) # Check if checkmate is present
self.undo_move((pos[0], pos[1]+castling_dir), pos)
return not in_check
def get_promo_input(self):
try:
piece = input('What would you like to promote your pawn to? ').lower()
return {'q':Queen, 'queen':Queen, 'k':Knight, 'knight':Knight, 'r':Rook, \
'rook':Rook, 'b':Bishop, 'bishop':Bishop}[piece]
except:
print('Invalid response. Enter \'queen\', \'knight\', \'rook\', or \'bishop\'.')
return self.get_promo_input()
def get_available_moves(self, color):
moves = []
for pos in self.board:
if self.board[pos].color == color:
piece_moves = self.board[pos].available_moves(self.board, self.all_moves)
for m in piece_moves:
moves.append((pos, m))
# Delete invalid moves
for i, move in reversed(list(enumerate(moves))):
if not self.is_valid_move(move):
del moves[i]
if not moves:
print('No available moves.')
return
moves = [(move[0], move[1][0]) for move in moves]
for i, move in enumerate(moves):
moves[i] = (chr(move[0][1]+97).upper() + str(8-move[0][0]) + ' to ' + \
chr(move[1][1]+97).upper() + str(8-move[1][0]))
return moves
########################################################################################
def play_turn(self, move=None, promotion=None):
'''
Used for playing a turn and returning data
move is of the form (start_pos, end_pos)
ERROR CODES:
(1, winner) A player won the game
(2, 'stalemate') Stalemate
(3, 'quit') Signal to quit the game
(4, 'promotion') Need to promote a pawn before the next move
(5, 'promo failed') Promotion failed
(6, 'promo sucessful') Promotion successful
(7, available moves) Return a list of available moves for the current turn
(-2, 'no piece') No piece in that space
(-3, 'wrong color') Wrong color selected
(-1, 'invalid') Invalid move
(-4, 'move error') Error decoding move
(0, color) Successful turn by 'color'
'''
if type(move) == str:
try:
move = move.lower()
if move == 'q' or move == 'quit':
return (self.board, 3, 'quit')
if move == 'h' or move == 'help':
return (self.board, 7, self.get_available_moves(self.turn))
move = ((8-int(move[1]), ord(move[0])-97), (8-int(move[3]), ord(move[2])-97))
except:
return (self.board, -4, 'move error')
if self.promotion_required:
if not promotion:
return (self.board, 5, 'promo failed')
try:
promo_piece = {'q':Queen, 'queen':Queen, 'k':Knight, 'knight':Knight, \
'r':Rook, 'rook':Rook, 'b':Bishop, 'bishop':Bishop}[promotion.lower()]
except:
return (self.board, 5, 'promo failed')
old_pawn = self.board[self.promotion_pos]
del self.board[self.promotion_pos]
self.board[self.promotion_pos] = promo_piece(old_pawn.color)
self.board[self.promotion_pos].pos_list = old_pawn.pos_list
self.promotion_required = False
self.turn = WHITE if self.turn == BLACK else BLACK
return (self.board, 6, 'promo sucessful')
if move is None:
return (self.board, 3, 'quit')
if move[0] not in self.board:
return (self.board, -2, 'no piece')
if self.board[move[0]].color != self.turn:
return (self.board, -3, 'wrong color')
selected_piece_available_moves = self.board[move[0]].available_moves(self.board, self.all_moves)
valid_dests = [m[0] for m in selected_piece_available_moves]
if not move[1] in valid_dests:
return (self.board, -1, 'invalid')
dest_plus_info = next(m for m in selected_piece_available_moves if m[0] == move[1])
move = (move[0], dest_plus_info)
if not self.is_valid_move(move):
return (self.board, -1, 'invalid')
# Move pieces and remove captured pieces from the board
self.do_move(move)
# Promotion
promo_row = 0 if self.turn == WHITE else 7
pos = self.all_moves[-1]
if pos[0] == promo_row and self.board[pos].name == 'pawn':
self.promotion_required = True
self.promotion_pos = pos
return (self.board, 4, 'promotion')
self.turn = WHITE if self.turn == BLACK else BLACK
if not self.has_move(self.turn):
if self.is_in_check(self.turn):
winner = WHITE if self.turn == BLACK else BLACK
return (self.board, 1, winner)
return (self.board, 2, 'stalemate')
return (self.board, 0, WHITE if self.turn == BLACK else BLACK)
def get_board(self):
return self.board
def get_turn(self):
return self.turn
def get_promotion(self):
return self.promotion_required
def get_random_move(self, color):
moves = []
for pos in self.board:
if self.board[pos].color == color:
piece_moves = self.board[pos].available_moves(self.board, self.all_moves)
for m in piece_moves:
moves.append((pos, m))
if not moves:
return None
random.shuffle(moves)
move = moves.pop()
while not self.is_valid_move(move):
if len(moves) == 0:
return None
random.shuffle(moves)
move = moves.pop()
return (move[0], move[1][0])
def get_smart_move(self, color):
if self.promotion_required:
return None
# Get all potential moves
moves = []
for pos in self.board:
if self.board[pos].color == color:
piece_moves = self.board[pos].available_moves(self.board, self.all_moves)
for move in piece_moves:
moves.append((pos, move))
# Delete invalid moves
for i, move in reversed(list(enumerate(moves))):
if not self.is_valid_move(move):
del moves[i]
# Get moves in which a piece is captured
capture_moves = []
for move in moves:
if move[1][0] in self.board:
capture_moves.append(move)
# Get moves in which the opposing king is put in check
check_moves = []
for move in moves:
pos_start, pos_final, captured, captured_pos, castle = self.do_move(move)
if self.can_see_king(color):
check_moves.append(move)
if castle:
self.undo_move(captured_pos, captured)
self.undo_move(pos_final, pos_start)
else:
self.undo_move(pos_final, pos_start)
if captured:
self.board[captured_pos] = captured
# Get moves which capture and put the opposing king in check
capture_and_check_moves = []
for move in capture_moves:
if move in check_moves:
capture_and_check_moves.append(move)
# Return a random move fromt he best available list
if capture_and_check_moves:
random.shuffle(capture_and_check_moves)
move = capture_and_check_moves.pop()
return (move[0], move[1][0])
if check_moves:
random.shuffle(check_moves)
move = check_moves.pop()
return (move[0], move[1][0])
if capture_moves:
random.shuffle(capture_moves)
move = capture_moves.pop()
return (move[0], move[1][0])
if not moves:
return | |
isinstance(idx, tuple) else idx
for elem_idx, elem in enumerate(idx):
# boolean arrays in tuple (cross-indices) must be 1-Dimensional
if elem is not None and elem.dtype.kind == 'b' and \
elem.size != self.shape[elem_idx]:
raise IndexError(
"boolean index array for axis {:} must have "
"size {:}.".format(elem_idx, self.shape[elem_idx]))
def _check_index_slice(self, elem_idx, elem):
"""Check slice index bounds.
Parameters
----------
elem_idx : int
Target axis for slice index.
elem : slice
Slice index to check.
Raises
------
IndexError : if slice operation is out of array's bounds.
"""
if elem.start is not None:
# The trigger is always 0 except when
# elem.start and self.shape[elem_idx] have same module
# but opposite sign. Slices behave differently for
# negatives and positives
trig = abs(elem.start) == self.shape[elem_idx] and \
elem.start != self.shape[elem_idx]
if abs(elem.start) > self.shape[elem_idx] + trig - 1:
raise IndexError(
"start element of index {:}, slice({:}, {:}, {:}),"
" is out of bounds.".format(elem_idx, elem.start,
elem.step, elem.stop))
if elem.stop is not None:
if elem.stop > self.shape[elem_idx]:
raise IndexError(
"stop element of index {:}, slice({:}, {:}, {:}), "
"is out of bounds.".format(elem_idx, elem.start,
elem.step, elem.stop))
def __getitem__(self, idx):
"""Redefinition of the get operation."""
if is_list_of_lists(idx):
# Natively supported for multi-dimensional (not flat) arrays
# The list of lists must be passed as a tuple
return self.__class__(
np.ndarray.__getitem__(
self.atleast_2d().tondarray(), tuple(idx)))
# Check index for all other cases
idx = self._check_index(idx)
# We are ready for numpy
return self.__class__(np.ndarray.__getitem__(self.tondarray(), idx))
def __setitem__(self, idx, value):
"""Redefinition of the set operation."""
# Check for setitem value
if isinstance(value, CDense):
if value.is_vector_like and value.ndim > 1:
# We transform vector-like arrays of 2 or more dims to vectors
# in order to always perform the set operation correctly
value = value.ravel()
value = value.tondarray()
elif not (is_scalar(value) or is_bool(value)):
raise TypeError("{:} cannot be used for setting "
"a CDense.".format(type(value)))
if is_list_of_lists(idx):
# Natively supported for multi-dimensional (not flat) arrays
# The list of lists must be passed as a tuple
np.ndarray.__setitem__(
self.atleast_2d().tondarray(), tuple(idx), value)
return
# Check index for all other cases
idx = self._check_index(idx)
# We are ready for numpy
np.ndarray.__setitem__(self.tondarray(), idx, value)
# ------------------------------------ #
# # # # # # SYSTEM OVERLOADS # # # # # #
# -------------------------------------#
def __add__(self, other):
"""Element-wise addition.
Parameters
----------
other : CDense or scalar or bool
Element to add to current array. If a CDense, element-wise
addition will be performed. If scalar or boolean, the element
will be sum to each array element.
Returns
-------
array : CDense
Array after addition.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
np.add(self.tondarray(), self._buffer_to_builtin(other)))
else:
return NotImplemented
def __radd__(self, other):
"""Element-wise (inverse) addition.
Parameters
----------
other : scalar or bool
Element to add to current array.
The element will be sum to each array element.
Returns
-------
array : CDense
Array after addition.
"""
if is_scalar(other) or is_bool(other):
return self.__class__(np.add(other, self.tondarray()))
else:
return NotImplemented
def __sub__(self, other):
"""Element-wise subtraction.
Parameters
----------
other : CDense or scalar or bool
Element to subtract to current array. If a CDense, element-wise
subtraction will be performed. If scalar or boolean, the element
will be subtracted to each array element.
Returns
-------
array : CDense
Array after subtraction.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
np.subtract(self.tondarray(), self._buffer_to_builtin(other)))
else:
return NotImplemented
def __rsub__(self, other):
"""Element-wise (inverse) subtraction.
Parameters
----------
other : scalar or bool
Element to subtract to current array.
The element will be subtracted to each array element.
Returns
-------
array : CDense
Array after subtraction.
"""
if is_scalar(other) or is_bool(other):
return self.__class__(np.subtract(other, self.tondarray()))
else:
return NotImplemented
def __mul__(self, other):
"""Element-wise product.
Parameters
----------
other : CDense or scalar or bool
Element to multiplied to current array. If a CDense, element-wise
product will be performed. If scalar or boolean, the element
will be multiplied to each array element.
Returns
-------
array : CDense
Array after product.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
np.multiply(self.tondarray(), self._buffer_to_builtin(other)))
else:
return NotImplemented
def __rmul__(self, other):
"""Element-wise (inverse) product.
Parameters
----------
other : scalar or bool
Element to multiplied to current array.
The element will be multiplied to each array element.
Returns
-------
array : CDense
Array after product.
"""
if is_scalar(other) or is_bool(other):
return self.__class__(np.multiply(other, self.tondarray()))
else:
return NotImplemented
def __truediv__(self, other):
"""Element-wise true division.
Parameters
----------
other : CDense or scalar or bool
Element to divided to current array. If a CDense, element-wise
division will be performed. If scalar or boolean, the element
will be divided to each array element.
Returns
-------
array : CDense
Array after division.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
np.true_divide(self.tondarray(),
self._buffer_to_builtin(other)))
else:
return NotImplemented
def __rtruediv__(self, other):
"""Element-wise (inverse) true division.
Parameters
----------
other : scalar or bool
Element to divided to current array.
The element will be divided to each array element.
Returns
-------
array : CDense
Array after division.
"""
if is_scalar(other) or is_bool(other):
return self.__class__(np.true_divide(other, self.tondarray()))
else:
return NotImplemented
def __floordiv__(self, other):
"""Element-wise floor division.
Parameters
----------
other : CDense or scalar or bool
Element to divided to current array. If a CDense, element-wise
division will be performed. If scalar or boolean, the element
will be divided to each array element.
Returns
-------
array : CDense
Array after division.
"""
# Result of Numpy floor division is not reliable
# (nan in place of inf, etc.)... Let's floor the truediv result
out_truediv = self.__truediv__(other)
if out_truediv is NotImplemented:
return NotImplemented
else: # Return the integer part of the truediv result
return out_truediv.floor()
def __rfloordiv__(self, other):
"""Element-wise (inverse) floor division.
Parameters
----------
other : scalar or bool
Element to divided to current array.
The element will be divided to each array element.
Returns
-------
array : CDense
Array after division.
"""
# Result of Numpy floor division is not reliable
# (nan in place of inf, etc.)... Let's floor the truediv result
return self.__floordiv__(other)
def __abs__(self):
"""Returns array elements without sign.
Returns
-------
array : CDense
Array with the corresponding elements without sign.
"""
return self.__class__(np.abs(self.tondarray()))
def __neg__(self):
"""Returns array elements with negated sign.
Returns
-------
array : CDense
Array with the corresponding elements with negated sign.
"""
return self.__class__(np.negative(self.tondarray()))
def __pow__(self, power):
"""Element-wise power.
Parameters
----------
power : CDense or scalar or bool
Power to use. If scalar or boolean, each array element will be
elevated to power. If a CDense, each array element will be
elevated to the corresponding element of the input array.
Returns
-------
array : CDense
Array after power.
"""
if is_scalar(power) or is_bool(power) or isinstance(power, CDense):
return self.__class__(
self.tondarray().__pow__(self._buffer_to_builtin(power)))
else:
return NotImplemented
def __rpow__(self, power):
"""Element-wise (inverse) power.
Parameters
----------
power : scalar or bool
Power to use. Each array element will be elevated to power.
Returns
-------
array : CDense
Array after power.
"""
if is_scalar(power) or is_bool(power):
return self.__class__(self.tondarray().__rpow__(power))
else:
return NotImplemented
def __eq__(self, other):
"""Element-wise == operator.
Parameters
----------
other : CDense or scalar or bool
Element to be compared. If a CDense, element-wise
comparison will be performed. If scalar or boolean,
the element will be compared to each array element.
Returns
-------
array : CDense
Boolean array with comparison result.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
self.tondarray() == self._buffer_to_builtin(other))
else:
return NotImplemented
def __lt__(self, other):
"""Element-wise < operator.
Parameters
----------
other : CDense or scalar or bool
Element to be compared. If a CDense, element-wise
comparison will be performed. If scalar or boolean,
the element will be compared to each array element.
Returns
-------
array : CDense
Boolean array with comparison result.
"""
if is_scalar(other) or is_bool(other) or isinstance(other, CDense):
return self.__class__(
self.tondarray() < self._buffer_to_builtin(other))
else:
return NotImplemented
def __le__(self, other):
"""Element-wise <= operator.
Parameters
----------
other : CDense or scalar or bool
Element to be compared. If a CDense, element-wise
comparison will be performed. If scalar or | |
from problog.program import PrologString, PrologFile, LogicProgram
from problog.logic import Term, Constant, Clause, AnnotatedDisjunction, Not
from problog.engine import ClauseDB, DefaultEngine
from problog.tasks.sample import sample
import random
import re
def create_observations(processed_model_filename, n=2):
"""
Create n examples from processed_model_filename each observing all queried terms.
:param processed_model_filename: The filename of the problog program to observe from.
:type processed_model_filename: str
:param n: The amount of examples to create/sample.
:type n: int
:return: n examples (interpretations) and their utility. Each example is a tuple of observations and their utility.
The observations are represented as a list of tuples, each consisting of a queried Term and their truth-value in this
example. The utility of an example is the sum of the utilities of each term in the example.
:rtype: list[tuple[list[(Term, bool)], int]]
"""
model = PrologFile(processed_model_filename)
# Prepare dict Term to utility
engine = DefaultEngine()
db = engine.prepare(model)
def convert(t):
if isinstance(t, Not) or str(t.functor) == 'not' or str(t.functor) == '\+':
return t.args[0], False
else:
return t, True
term_to_utility = {convert(q[0]): q[1] for q in engine.query(db, Term('utility', None, None))}
# Construct examples
examples_list = list() # type: list[tuple[list[(Term, bool)], int]]
for example in sample(model, n=n, format='dict'):
# Convert to list of observed terms: (Term, True/False)
observations = list(example.items())
# Calculate utility
utility = 0
for observation in observations:
utility_obs = term_to_utility.get(observation)
utility_obs = utility_obs.compute_value() if utility_obs is not None else 0
utility += utility_obs
# Store result
examples_list.append((observations, utility))
return examples_list
def get_term_to_utilities(filename):
"""
Get a dictionary mapping each term to its utility.
:param filename: The file (path) from which to extract the terms and utilities.
:type filename: str
:return: A mapping of term to utility.
:rtype: dict[Term, float]
"""
pl = PrologFile(filename)
engine = DefaultEngine()
terms_to_utilities = {q[0]: q[1].compute_value() for q in engine.query(pl, Term('utility', None, None))}
return terms_to_utilities
def query_all_terms(db):
"""
Change the given database to query all its terms.
:param db: The database to extract the terms from and to add the query(terms) to.
:type db: ClauseDB
"""
term_set = get_terms(db)
for term in term_set:
db.add_fact(Term('query', term))
def add_utility_terms_to_db(db, nb_utility_nodes, samples_per_utility_node):
"""
Add nb_of_utility new utility nodes, each with a positive and negative utility. The rules that make a utility node
true are based on random samples from the model. Each utility node has samples_per_utility_node, not per se unique.
:param db: The database to add to.
:type db: ClauseDB
:param nb_utility_nodes: The amount of utility facts to add.
:type nb_utility_nodes: int
:param samples_per_utility_node: The amount of rules to add per introduced utility fact. These rules are
constructed by sampling the model.
:type samples_per_utility_node: int
:return: db extended with utility facts and rules to make those new utility facts true.
:rtype: ClauseDB
"""
db_new = db.extend()
def observation_to_str(observation):
not_str = "" if observation[1] else "\+"
return "{}{}".format(not_str, str(observation[0]))
for i in range(0, nb_utility_nodes):
util_term = Term('util_node', Constant(i))
# Add utility facts
utility_pos = random.randint(-50, 50)
utility_neg = random.randint(-50, 50)
db_new.add_fact(Term('utility', util_term, Constant(utility_pos)))
db_new.add_fact(Term('utility', Not('\\+', util_term), Constant(utility_neg)))
# Add rules
for example in sample(db, n=samples_per_utility_node, format='dict'):
observations = list(example.items())
pl_str = "{} :- {}".format(str(util_term), observation_to_str(observations[0]))
for observation in observations[1:]:
pl_str += ", {}".format(observation_to_str(observation))
pl_str += "."
for statement in PrologString(pl_str):
db_new += statement
return db_new
def add_utilities_to_db(db, next_random_utility, prob_of_utility_for_pos=0.8, prob_of_utility_for_neg=0.3):
"""
Add utilities for the terms in db for which there is a query(term) fact. Utilities are added for both the positive
and negative term with a given probability. The utility to assign is determined by next_random_utility, a function
taking the Term and a boolean as argument.
:param db: The database to add utilities to
:type db: ClauseDB
:param next_random_utility: A function which provides the next random utility to assign to a term. This function
takes two arguments, a Term and a boolean. The Term is the term for which we want to assign a value. The boolean
denotes whether it is a positive term.
:type next_random_utility: function[Term,boolean]
:param prob_of_utility_for_pos The probability that a term should have a utility.
:type prob_of_utility_for_pos: float
:param prob_of_utility_for_neg The probability that the negation of a term should have a utility.
:type prob_of_utility_for_neg: float
"""
engine = DefaultEngine()
queries = [q[0] for q in engine.query(db, Term('query', None))]
for query in queries:
if random.random() <= prob_of_utility_for_pos:
utility = next_random_utility(query, True)
db.add_fact(Term("utility", query, Constant(utility)))
if random.random() <= prob_of_utility_for_neg:
utility = next_random_utility(query, False)
db.add_fact(Term("utility", Not('\+', query), Constant(utility)))
def add_unknown_utilities_to_db(db_source, db_target, prob_of_unknown=0.3):
"""
Add utilities and unknown utilities to the target database. For each utility term in the source database: with
probability prob_of_unknown, the term has an unknown probability in the target database. With a probability of
1-prob_of_unknown, the term has the same utility in the target database as in the source database.
:param db_source: The source database
:type db_source: ClauseDB
:param db_target: The target database
:type db_target: ClauseDB
:param prob_of_unknown: Probability that a utility from the source db becomes an unknown utility in the target db.
:type prob_of_unknown: float
"""
engine = DefaultEngine()
queries = [(q[0], q[1]) for q in engine.query(db_source, Term('utility', None, None))]
for query in queries:
if random.random() <= prob_of_unknown:
db_target.add_fact(Term('utility', query[0], Term('t', -1)))
#for statement in PrologString("utility({},t(_)).".format(query[0])):
# db_target += statement
else:
db_target.add_fact(Term('utility', query[0], query[1]))
def add_decisions_to_db(db, decision_prob=0.10):
"""
Add decisions to a database by changing leafs into decisions. The resulting db is a new db based on the
input db and the added decisions. If less than 4 decisions were added, we also add a decision for each AD with more
than 2 values (with a probability of decision_prob).
:param db: The database to start from.
:type db: ClauseDB
:param decision_prob: The probability that an AD with more than 2 values introduces a decision when less than 4
decisions were added.
:type decision_prob: float
:return: The new database and the number of decisions added to that database
:rtype: ClauseDB, int
"""
nb_of_decisions = 0
remove_list = list()
true_terms = set()
true_terms_statement = dict()
# Add decisions
temp_db = db.extend()
temp_db_pl = temp_db.to_prolog()
for term in db:
if isinstance(term, Clause) and str(term.args[1]) == 'true':
true_terms.add(term.args[0])
true_terms_statement[term.args[0]] = term
elif isinstance(term, AnnotatedDisjunction) and term.args[1] in true_terms and len(term.args[0]) == 2:
remove_list.append(str(term) + ".")
remove_list.append(str(true_terms_statement[term.args[1]]) + ".")
temp_db_pl += "\n?::dec_{}.".format(nb_of_decisions)
temp_db_pl += "\n{} :- dec_{}.".format(str(term.args[0][0]), nb_of_decisions)
temp_db_pl += "\n{} :- \\+dec_{}.".format(str(term.args[0][1]), nb_of_decisions)
nb_of_decisions += 1
elif not isinstance(term, Clause) and not isinstance(term, AnnotatedDisjunction) and term.functor != 'query' \
and term.functor != 'utility' and term.functor != 'evidence':
temp_db_pl += "\n" + str(term.with_probability(Term('?'))) + "."
remove_list.append(str(term) + ".")
nb_of_decisions += 1
# invent more decisions if there are only few.
if nb_of_decisions <= 3:
for term in db:
if isinstance(term, AnnotatedDisjunction) and (not term.args[1] in true_terms) and random.random() <= decision_prob:
temp_db_pl += "\n?::dec_{}.".format(nb_of_decisions)
rand_index = random.randint(0, len(term.args[0])-1)
temp_db_pl += "\n{} :- dec_{}.".format(str(term.args[0][rand_index]), nb_of_decisions)
nb_of_decisions += 1
# Remove old probabilistic lines.
for rem_str in remove_list:
temp_db_pl = temp_db_pl.replace(rem_str, "")
program = PrologString(temp_db_pl)
engine = DefaultEngine(label_all=True, keep_order=True)
db_new = engine.prepare(program)
return db_new, nb_of_decisions
def add_decisions_to_db_archived(db, decision_prob=0.10):
"""
Add decisions to a database by changing leafs into decisions. The resulting db is a new db based on the
input db and the added decisions. If less than 4 decisions were added, we also add a decision for each AD with more
than 2 values (with a probability of decision_prob).
:param db: The database to start from.
:type db: ClauseDB
:param decision_prob: The probability that an AD with more than 2 values introduces a decision when less than 4
decisions were added.
:type decision_prob: float
:return: The new database and the number of decisions added to that database
:rtype: ClauseDB, int
"""
nb_of_decisions = 0
remove_list = list()
true_terms = set()
true_terms_statement = dict()
# Add decisions
temp_db = db.extend()
for term in db:
if isinstance(term, Clause) and str(term.args[1]) == 'true':
true_terms.add(term.args[0])
true_terms_statement[term.args[0]] = term
elif isinstance(term, AnnotatedDisjunction) and term.args[1] in true_terms and len(term.args[0]) == 2:
remove_list.append(str(term) + ".")
remove_list.append(str(true_terms_statement[term.args[1]]) + ".")
new_pl_str = "?::dec_{}.\n".format(nb_of_decisions)
new_pl_str += "{} :- dec_{}.\n".format(str(term.args[0][0]), nb_of_decisions)
new_pl_str += "{} :- \\+dec_{}.".format(str(term.args[0][1].with_probability(None)), nb_of_decisions)
for statement in PrologString(new_pl_str):
temp_db | |
legend_loc='best', **kwargs):
"""Plot HOLE profiles :math:`R(\zeta)` in a 1D graph.
Lines are colored according to the specified ``color`` or
drawn from the color map ``cmap``. One line is
plotted for each trajectory frame.
Parameters
----------
frames: array-like, optional
Frames to plot. If ``None``, plots all of them.
Default: ``None``
color: str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``. Default: ``None``
cmap: str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module. Default: 'viridis'
linestyle: str or array-like, optional
Line style for the plot. Default: '-'
y_shift : float, optional
displace each :math:`R(\zeta)` profile by ``y_shift`` in the
:math:`y`-direction for clearer visualization. Default: 0.0
label : bool or string, optional
If ``False`` then no legend is
displayed. Default: ``True``
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes. Default: ``None``
legend_loc : str, optional
Location of the legend. Default: 'best'
kwargs : `**kwargs`
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~matplotlib.axes.Axes`
Axes with the plot, either `ax` or the current axes.
"""
if not self.profiles:
raise ValueError('No profiles available. Try calling run()')
if ax is None:
fig, ax = plt.subplots()
fcl = self._process_plot_kwargs(frames=frames,
color=color, cmap=cmap, linestyle=linestyle)
for i, (frame, c, ls) in enumerate(zip(*fcl)):
profile = self.profiles[frame]
dy = i*y_shift
ax.plot(profile.rxn_coord, profile.radius+dy, color=c,
linestyle=ls, zorder=-frame, label=str(frame),
**kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(r"HOLE radius $R$ ($\AA$)")
if label == True:
ax.legend(loc=legend_loc)
return ax
def plot3D(self, frames=None,
color=None, cmap='viridis',
linestyle='-', ax=None, r_max=None,
ylabel='Frames', **kwargs):
"""Stacked 3D graph of profiles :math:`R(\zeta)`.
Lines are colored according to the specified ``color`` or
drawn from the color map ``cmap``. One line is
plotted for each trajectory frame.
Parameters
----------
frames: array-like, optional
Frames to plot. If ``None``, plots all of them.
Default: ``None``
color: str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``. Default: ``None``
cmap: str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module. Default: 'viridis'
linestyle: str or array-like, optional
Line style for the plot. Default: '-'
r_max : float, optional
only display radii up to ``r_max``. If ``None``, all radii are
plotted. Default: ``None``
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes. Default: ``None``
ylabel : str, optional
Y-axis label. Default: 'Frames'
**kwargs :
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~mpl_toolkits.mplot3d.Axes3D`
Axes with the plot, either `ax` or the current axes.
"""
if not self.profiles:
raise ValueError('No profiles available. Try calling run()')
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fcl = self._process_plot_kwargs(frames=frames,
color=color, cmap=cmap,
linestyle=linestyle)
for frame, c, ls in zip(*fcl):
profile = self.profiles[frame]
if r_max is None:
radius = profile.radius
rxn_coord = profile.rxn_coord
else:
# does not seem to work with masked arrays but with nan hack!
# http://stackoverflow.com/questions/4913306/python-matplotlib-mplot3d-how-do-i-set-a-maximum-value-for-the-z-axis
rxn_coord = profile.rxn_coord
radius = profile.radius.copy()
radius[radius > r_max] = np.nan
ax.plot(rxn_coord, frame*np.ones_like(rxn_coord), radius,
color=c, linestyle=ls, zorder=-frame, label=str(frame),
**kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(ylabel)
ax.set_zlabel(r"HOLE radius $R$ ($\AA$)")
plt.tight_layout()
return ax
def over_order_parameters(self, order_parameters, frames=None):
"""Get HOLE profiles sorted over order parameters ``order_parameters``.
Parameters
----------
order_parameters: array-like or string
Sequence or text file containing order parameters (float
numbers) corresponding to the frames in the trajectory. Must
be same length as trajectory.
frames: array-like, optional
Selected frames to return. If ``None``, returns all of them.
Default: ``None``
Returns
-------
collections.OrderedDict
sorted dictionary of {order_parameter:profile}
"""
if not self.profiles:
raise ValueError('No profiles available. Try calling run()')
if isinstance(order_parameters, six.string_types):
try:
order_parameters = np.loadtxt(order_parameters)
except IOError:
raise ValueError('Data file not found: {}'.format(order_parameters))
except (ValueError, TypeError):
msg = ('Could not parse given file: {}. '
'`order_parameters` must be array-like '
'or a filename with array data '
'that can be read by np.loadtxt')
raise ValueError(msg.format(order_parameters))
order_parameters = np.asarray(order_parameters)
if len(order_parameters) != len(self._trajectory):
msg = ('The number of order parameters ({}) must match the '
'length of the trajectory ({} frames)')
raise ValueError(msg.format(len(order_parameters),
len(self._trajectory)))
if frames is None:
frames = self.frames
else:
frames = np.asarray(util.asiterable(frames))
idx = np.argsort(order_parameters[frames])
sorted_frames = frames[idx]
profiles = OrderedDict()
for frame in sorted_frames:
profiles[order_parameters[frame]] = self.profiles[frame]
return profiles
def plot_order_parameters(self, order_parameters,
aggregator=min,
frames=None,
color='blue',
linestyle='-', ax=None,
ylabel=r'Minimum HOLE pore radius $r$ ($\AA$)',
xlabel='Order parameter',
**kwargs):
r"""Plot HOLE radii over order parameters. This function needs
an ``aggregator`` function to reduce the ``radius`` array to a
single value, e.g. ``min``, ``max``, or ``np.mean``.
Parameters
----------
order_parameters: array-like or string
Sequence or text file containing order parameters (float
numbers) corresponding to the frames in the trajectory. Must
be same length as trajectory.
aggregator: callable, optional
Function applied to the radius array of each profile to
reduce it to one representative value. Default: ``min``
frames: array-like, optional
Frames to plot. If ``None``, plots all of them.
Default: ``None``
color: str or array-like, optional
Color for the plot. Default: 'blue'
linestyle: str or array-like, optional
Line style for the plot. Default: '-'
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes. Default: ``None``
xlabel : str, optional
X-axis label. Default: 'Order parameter'
ylabel : str, optional
Y-axis label. Default: 'Minimum HOLE pore radius $r$ ($\AA$)'
**kwargs :
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~matplotlib.axes.Axes`
Axes with the plot, either `ax` or the current axes.
"""
op_profiles = self.over_order_parameters(order_parameters,
frames=frames)
if ax is None:
fig, ax = plt.subplots()
data = [[x, aggregator(p.radius)] for x, p in op_profiles.items()]
arr = np.array(data)
ax.plot(arr[:, 0], arr[:, 1], color=color, linestyle=linestyle)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def gather(self, frames=None, flat=False):
"""Gather the fields of each profile recarray together.
Parameters
----------
frames: int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames. Default: ``None``
flat: bool, optional
Whether to flatten the list of field arrays into a
single array. Default: ``False``
Returns
-------
dict
dictionary of fields
"""
if frames is None:
frames = self.frames
frames = util.asiterable(frames)
profiles = [self.profiles[k] for k in frames]
rxncoords = [p.rxn_coord for p in profiles]
radii = [p.radius for p in profiles]
cen_line_Ds = [p.cen_line_D for p in profiles]
if flat:
rxncoords = np.concatenate(rxncoords)
radii = np.concatenate(radii)
cen_line_Ds = np.concatenate(cen_line_Ds)
dct = {'rxn_coord': rxncoords,
'radius': radii,
'cen_line_D': cen_line_Ds}
return dct
def bin_radii(self, frames=None, bins=100, range=None):
"""Collects the pore radii into bins by reaction coordinate.
Parameters
----------
frames: int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames. Default: ``None``
bins: int or iterable of edges, optional
If bins is an int, it defines the number of equal-width bins in the given range. If bins is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. Default: 100
range : (float, float), optional
The lower and upper range of the bins.
If not provided, ``range`` is simply ``(a.min(), a.max())``,
where ``a`` is the array of reaction coordinates.
Values outside the range are ignored. The first element of the range must be less than or equal to the second.
Returns
-------
list of arrays of floats
List of radii present in each bin
array of (float, float)
Edges of each bin
"""
agg = self.gather(frames=frames, flat=True)
coords = agg['rxn_coord']
if not util.iterable(bins):
if range is None:
range = (coords.min(), coords.max())
xmin, xmax = range
if xmin == xmax:
xmin -= 0.5
xmax += 0.5
bins = np.linspace(xmin, xmax, bins+1, endpoint=True)
else:
bins = np.asarray(bins)
bins = bins[np.argsort(bins)]
idx = np.argsort(coords)
coords = coords[idx]
| |
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import fnmatch
import numpy as np
import random
import math
import re
import csv
#
# PseudoLMGenerator
#
class PseudoLMGenerator(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "PseudoLMGenerator" # TODO make this more human readable by adding spaces
self.parent.categories = ["SlicerMorph.Geometric Morphometrics"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (UW), <NAME> (UW)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This module samples projects semi-landmark points from a spherical surface to a model.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This module was developed by <NAME> and <NAME>, through a NSF ABI Development grant, "An Integrated Platform for Retrieval, Visualization and Analysis of
3D Morphology From Digital Biological Collections" (Award Numbers: 1759883 (Murat Maga), 1759637 (<NAME>), 1759839 (Douglas Boyer)).
https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false
""" # replace with organization, grant and thanks.
#
# PseudoLMGeneratorWidget
#
class PseudoLMGeneratorWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def onSelect(self):
self.getPointNumberButton.enabled = bool ( self.modelSelector.currentNode() )
self.projectionFactor.enabled = True
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Input Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Select base mesh
#
self.modelSelector = slicer.qMRMLNodeComboBox()
self.modelSelector.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.modelSelector.selectNodeUponCreation = False
self.modelSelector.addEnabled = False
self.modelSelector.removeEnabled = False
self.modelSelector.noneEnabled = True
self.modelSelector.showHidden = False
self.modelSelector.setMRMLScene( slicer.mrmlScene )
parametersFormLayout.addRow("Base mesh: ", self.modelSelector)
#
# Set spacing tolerance
#
self.spacingTolerance = ctk.ctkSliderWidget()
self.spacingTolerance.singleStep = .1
self.spacingTolerance.minimum = 0
self.spacingTolerance.maximum = 10
self.spacingTolerance.value = 4
self.spacingTolerance.setToolTip("Set tolerance of spacing as a percentage of the image diagonal")
parametersFormLayout.addRow("Spacing tolerance: ", self.spacingTolerance)
#
# Parameters Area
#
templateCollapsibleButton = ctk.ctkCollapsibleButton()
templateCollapsibleButton.text = "Template Geometry"
self.layout.addWidget(templateCollapsibleButton)
# Layout within the dummy collapsible button
templateFormLayout = qt.QFormLayout(templateCollapsibleButton)
#
# Select geometry of template
#
self.OriginalType=qt.QRadioButton()
self.OriginalType.setChecked(True)
self.EllipseType=qt.QRadioButton()
self.SphereType=qt.QRadioButton()
templateFormLayout.addRow("Original Geometry", self.OriginalType)
templateFormLayout.addRow("Ellipse", self.EllipseType)
templateFormLayout.addRow("Sphere", self.SphereType)
#
# Set template scale factor
#
self.scaleFactor = ctk.ctkSliderWidget()
self.scaleFactor.enabled = False
self.scaleFactor.singleStep = 1
self.scaleFactor.minimum = 75
self.scaleFactor.maximum = 150
self.scaleFactor.value = 110
self.scaleFactor.setToolTip("Set template scale factor as a percentage of the image diagonal")
templateFormLayout.addRow("Template scale factor : ", self.scaleFactor)
#
# Set max projection factor
#
self.projectionFactor = ctk.ctkSliderWidget()
self.projectionFactor.enabled = False
self.projectionFactor.singleStep = 1
self.projectionFactor.minimum = 1
self.projectionFactor.maximum = 200
self.projectionFactor.value = 200
self.projectionFactor.setToolTip("Set maximum projection as a percentage of the image diagonal")
templateFormLayout.addRow("Maximum projection factor : ", self.projectionFactor)
#
# Run Area
#
samplingCollapsibleButton = ctk.ctkCollapsibleButton()
samplingCollapsibleButton.text = "Run Sampling"
self.layout.addWidget(samplingCollapsibleButton)
# Layout within the dummy collapsible button
samplingFormLayout = qt.QFormLayout(samplingCollapsibleButton)
#
# Get Subsample Rate Button
#
self.getPointNumberButton = qt.QPushButton("Get subsample number")
self.getPointNumberButton.toolTip = "Output the number of points sampled on the template shape."
self.getPointNumberButton.enabled = False
samplingFormLayout.addRow(self.getPointNumberButton)
#
# Subsample Information
#
self.subsampleInfo = qt.QPlainTextEdit()
self.subsampleInfo.setPlaceholderText("Subsampling information")
self.subsampleInfo.setReadOnly(True)
samplingFormLayout.addRow(self.subsampleInfo)
#
# Apply sphere button
#
self.applySphereButton = qt.QPushButton("Generate template")
self.applySphereButton.toolTip = "Generate sampled template shape."
self.applySphereButton.enabled = False
samplingFormLayout.addRow(self.applySphereButton)
#
# Project Points button
#
self.projectPointsButton = qt.QPushButton("Project points to surface")
self.projectPointsButton.toolTip = "Project the points from the sampled template to the model surface."
self.projectPointsButton.enabled = False
samplingFormLayout.addRow(self.projectPointsButton)
#
# Clean Points button
#
self.cleanButton = qt.QPushButton("Enforce spatial sampling rate")
self.cleanButton.toolTip = "Remove points spaced closer than the user-specified tolerance."
self.cleanButton.enabled = False
samplingFormLayout.addRow(self.cleanButton)
# connections
self.modelSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onSelect)
self.OriginalType.connect('toggled(bool)', self.onToggleModel)
self.getPointNumberButton.connect('clicked(bool)', self.onGetPointNumberButton)
self.applySphereButton.connect('clicked(bool)', self.onApplySphereButton)
self.projectPointsButton.connect('clicked(bool)', self.onProjectPointsButton)
self.cleanButton.connect('clicked(bool)', self.onCleanButton)
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onToggleModel(self):
self.scaleFactor.enabled = bool(self.OriginalType.isChecked()) is False
def onGetPointNumberButton(self):
logic = PseudoLMGeneratorLogic()
spacingPercentage = self.spacingTolerance.value/100
scaleFactor = self.scaleFactor.value/100
if self.EllipseType.isChecked():
self.templatePolyData = logic.generateEllipseTemplate(self.modelSelector.currentNode(), spacingPercentage, scaleFactor)
elif self.SphereType.isChecked():
self.templatePolyData = logic.generateSphereTemplate(self.modelSelector.currentNode(), spacingPercentage, scaleFactor)
else:
self.templatePolyData = logic.generateOriginalGeometryTemplate(self.modelSelector.currentNode(), spacingPercentage)
self.subsampleInfo.insertPlainText(f'The subsampled template has a total of {self.templatePolyData.GetNumberOfPoints()} points. \n')
# enable next step
self.applySphereButton.enabled = True
def onApplySphereButton(self):
logic = PseudoLMGeneratorLogic()
# set up MRML node for sphere polyData and visualize
self.templateNode = logic.addTemplateToScene(self.templatePolyData)
#self.templateLMNode = logic.getTemplateLandmarks(self.templatePolyData)
# update visualization
#self.templateLMNode.GetDisplayNode().SetPointLabelsVisibility(False)
# blue=[0,0,1]
#self.templateLMNode.GetDisplayNode().SetSelectedColor(blue)
# enable next step
self.projectPointsButton.enabled = True
def onProjectPointsButton(self):
logic = PseudoLMGeneratorLogic()
isOriginalGeometry = self.OriginalType.isChecked()
maxProjection = self.projectionFactor.value/100
if isOriginalGeometry:
self.projectedLM = logic.runPointProjection(self.modelSelector.currentNode(), self.modelSelector.currentNode(), self.templateNode.GetPolyData().GetPoints(), maxProjection, isOriginalGeometry)
else:
self.projectedLM = logic.runPointProjection(self.templateNode, self.modelSelector.currentNode(), self.templateNode.GetPolyData().GetPoints(), maxProjection, isOriginalGeometry)
if self.projectedLM.GetNumberOfFiducials() == self.templateNode.GetPolyData().GetNumberOfPoints():
# update visualization
#self.templateLMNode.SetDisplayVisibility(False)
self.templateNode.SetDisplayVisibility(False)
self.projectedLM.GetDisplayNode().SetPointLabelsVisibility(False)
red=[1,0,0]
self.projectedLM.GetDisplayNode().SetSelectedColor(red)
# enable next step
self.cleanButton.enabled = True
else:
print("Error: projected point number does not match template point number")
self.cleanButton.enabled = False
def onCleanButton(self):
logic = PseudoLMGeneratorLogic()
spacingPercentage = self.spacingTolerance.value/100
self.sphericalSemiLandmarks = logic.runCleaning(self.projectedLM, self.templateNode, spacingPercentage)
# update visualization
self.projectedLM.SetDisplayVisibility(False)
for i in range(self.sphericalSemiLandmarks.GetNumberOfFiducials()):
self.sphericalSemiLandmarks.SetNthFiducialLocked(i,True)
self.sphericalSemiLandmarks.GetDisplayNode().SetPointLabelsVisibility(False)
green=[0,1,0]
self.sphericalSemiLandmarks.GetDisplayNode().SetSelectedColor(green)
#confirm number of cleaned points is in the expected range
self.subsampleInfo.insertPlainText(f'After filtering there are {self.sphericalSemiLandmarks.GetNumberOfFiducials()} semi-landmark points. \n')
#
# PseudoLMGeneratorLogic
#
class PseudoLMGeneratorLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def runCleaningOriginalGeometry(self, projectedLM, sphere, spacingPercentage):
# Convert projected surface points to a VTK array for transform
p=[0,0,0]
targetPoints = vtk.vtkPoints()
for i in range(projectedLM.GetNumberOfFiducials()):
projectedLM.GetMarkupPoint(0,i,p)
targetPoints.InsertNextPoint(p)
pointPD=vtk.vtkPolyData()
pointPD.SetPoints(targetPoints)
glyphFilter=vtk.vtkGlyph3D()
glyphFilter.SetInputData(pointPD)
glyphFilter.Update()
glyphPD = glyph.GetOutput()
cleanFilter=vtk.vtkCleanPolyData()
cleanFilter.SetTolerance(spacingPercentage)
cleanFilter.SetInputData(glyphPD)
cleanFilter.Update()
outputPoints = cleanFilter.GetOutput()
sphereSampleLMNode= slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode',"sphericalSampledLandmarks")
for i in range(outputPoints.GetNumberOfPoints()):
point = outputPoints.GetPoint(i)
sphereSampleLMNode.AddFiducialFromArray(point)
return sphereSampleLMNode
def runCleaning(self, projectedLM, sphere, spacingPercentage):
# Convert projected surface points to a VTK array for transform
p=[0,0,0]
targetPoints = vtk.vtkPoints()
for i in range(projectedLM.GetNumberOfFiducials()):
projectedLM.GetMarkupPoint(0,i,p)
targetPoints.InsertNextPoint(p)
# Set up a transform between the sphere and the points projected to the surface
transform = vtk.vtkThinPlateSplineTransform()
transform.SetSourceLandmarks( sphere.GetPolyData().GetPoints() )
transform.SetTargetLandmarks( targetPoints )
transform.SetBasisToR()
# Apply the transform to the sphere to create a model with spherical topology
transformNode=slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTransformNode","TPS")
transformNode.SetAndObserveTransformToParent(transform)
sphere.SetAndObserveTransformNodeID(transformNode.GetID())
slicer.vtkSlicerTransformLogic().hardenTransform(sphere)
sphere.SetDisplayVisibility(False)
# Clean up semi-landmarks within radius
filter=vtk.vtkCleanPolyData()
filter.SetToleranceIsAbsolute(False)
filter.SetTolerance(spacingPercentage/2)
filter.SetInputData(sphere.GetPolyData())
filter.Update()
cleanPolyData=filter.GetOutput()
# Create a landmark node from the cleaned polyData
sphereSampleLMNode= slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode',"sphericalSampledLandmarks")
for i in range(cleanPolyData.GetNumberOfPoints()):
point = cleanPolyData.GetPoint(i)
sphereSampleLMNode.AddFiducialFromArray(point)
return sphereSampleLMNode
def runPointProjection(self, sphere, model, spherePoints, maxProjectionFactor, isOriginalGeometry):
maxProjection = (model.GetPolyData().GetLength()) * maxProjectionFactor
# project landmarks from template to model
projectedPoints = self.projectPointsPolydata(sphere.GetPolyData(), model.GetPolyData(), spherePoints, maxProjection)
projectedLMNode= slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode',"projectedLM")
if(isOriginalGeometry):
for i in range(projectedPoints.GetNumberOfPoints()):
point = projectedPoints.GetPoint(i)
projectedLMNode.AddFiducialFromArray(point)
return projectedLMNode
else:
#project landmarks from model to model external surface
projectedPointsExternal = self.projectPointsPolydata(model.GetPolyData(), model.GetPolyData(), projectedPoints, maxProjection)
for i in range(projectedPointsExternal.GetNumberOfPoints()):
point = projectedPointsExternal.GetPoint(i)
projectedLMNode.AddFiducialFromArray(point)
return projectedLMNode
def projectPointsPolydata(self, sourcePolydata, targetPolydata, originalPoints, rayLength):
print("original points: ", originalPoints.GetNumberOfPoints())
#set up polydata for projected points to return
projectedPointData = vtk.vtkPolyData()
projectedPoints = vtk.vtkPoints()
projectedPointData.SetPoints(projectedPoints)
#set up locater for intersection with normal vector rays
obbTree = vtk.vtkOBBTree()
obbTree.SetDataSet(targetPolydata)
obbTree.BuildLocator()
#set up point locator for finding surface normals and closest point
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(sourcePolydata)
pointLocator.BuildLocator()
targetPointLocator = vtk.vtkPointLocator()
targetPointLocator.SetDataSet(targetPolydata)
targetPointLocator.BuildLocator()
#get surface normal from each landmark point
rayDirection=[0,0,0]
normalArray = sourcePolydata.GetPointData().GetArray("Normals")
if(not normalArray):
print("no normal array, calculating....")
normalFilter=vtk.vtkPolyDataNormals()
normalFilter.ComputePointNormalsOn()
normalFilter.SetInputData(sourcePolydata)
normalFilter.Update()
normalArray = normalFilter.GetOutput().GetPointData().GetArray("Normals")
if(not normalArray):
print("Error: no normal array")
return projectedPointData
for index in range(originalPoints.GetNumberOfPoints()):
originalPoint= originalPoints.GetPoint(index)
# get ray direction from closest normal
closestPointId = pointLocator.FindClosestPoint(originalPoint)
rayDirection = normalArray.GetTuple(closestPointId)
rayEndPoint=[0,0,0]
for dim in range(len(rayEndPoint)):
rayEndPoint[dim] = originalPoint[dim] + rayDirection[dim]* rayLength
intersectionIds=vtk.vtkIdList()
intersectionPoints=vtk.vtkPoints()
obbTree.IntersectWithLine(originalPoint,rayEndPoint,intersectionPoints,intersectionIds)
#if there are intersections, update the point to most external one.
if intersectionPoints.GetNumberOfPoints() > 0:
exteriorPoint = intersectionPoints.GetPoint(intersectionPoints.GetNumberOfPoints()-1)
projectedPoints.InsertNextPoint(exteriorPoint)
#if there are no intersections, reverse the normal vector
else:
for dim in range(len(rayEndPoint)):
rayEndPoint[dim] = originalPoint[dim] + rayDirection[dim]* -rayLength
obbTree.IntersectWithLine(originalPoint,rayEndPoint,intersectionPoints,intersectionIds)
if intersectionPoints.GetNumberOfPoints()>0:
exteriorPoint = intersectionPoints.GetPoint(0)
projectedPoints.InsertNextPoint(exteriorPoint)
#if none in reverse direction, use closest mesh point
else:
closestPointId = targetPointLocator.FindClosestPoint(originalPoint)
rayOrigin = targetPolydata.GetPoint(closestPointId)
projectedPoints.InsertNextPoint(rayOrigin)
return projectedPointData
def getTemplateLandmarks(self, spherePolyData):
semiLMNode= slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode',"templatePoints")
for i in range(spherePolyData.GetNumberOfPoints()):
point = spherePolyData.GetPoint(i)
semiLMNode.AddFiducialFromArray(point)
return semiLMNode
def addTemplateToScene(self, spherePolyData):
sphereNode= slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelNode',"templateModel")
sphereNode.SetAndObservePolyData(spherePolyData)
sphereNode.CreateDefaultDisplayNodes()
sphereNode.GetDisplayNode().SetRepresentation(1)
sphereNode.GetDisplayNode().SetColor(0,0,1)
| |
= hour
date_str['ppp_tttt'] = ppp_tttt
return date_str
#%%
def create_nc_grid_files_on_native_grid_from_mds(grid_input_dir,
grid_output_dir,
coordinate_metadata = dict(),
geometry_metadata = dict(),
global_metadata = dict(),
cell_bounds = None,
file_basename='ECCO-GRID',
title='llc grid geometry',
mds_datatype = '>f4',
write_to_disk = True,
less_output=True):
# grid input dir and grid output dir should be of type pathlib.PosixPath
mds_files = ''
if isinstance(grid_input_dir, str):
grid_input_dir = Path(grid_input_dir)
if isinstance(grid_output_dir, str):
grid_output_dir = Path(grid_output_dir)
if not less_output:
print(str(grid_input_dir))
grid = load_ecco_vars_from_mds(str(grid_input_dir),
str(grid_input_dir),
mds_files,
vars_to_load = 'all',
drop_unused_coords = False,
grid_vars_to_coords = False,
coordinate_metadata = coordinate_metadata,
variable_metadata = geometry_metadata,
global_metadata = global_metadata,
cell_bounds = cell_bounds,
mds_datatype = mds_datatype,
less_output = less_output)
if not less_output:
print(grid)
# for key in grid.attrs.keys():
# if 'geo' in key or 'time' in key or 'cell_method' in key:
# grid.attrs.pop(key)
if 'time_coverage_end' in list(grid.attrs.keys()):
grid.attrs.pop('time_coverage_end')
if 'time_coverage_start' in list(grid.attrs.keys()):
grid.attrs.pop('time_coverage_start')
grid.attrs['title'] = title
grid.attrs['product_name'] = file_basename + '.nc'
grid.attrs['uuid'] = str(uuid.uuid1())
# remove attributes that have TBD in their values
for attr in list(grid.attrs.keys()):
if type(grid.attrs[attr]) == str :
if 'TBD' in grid.attrs[attr]:
grid.attrs.pop(attr)
# save to disk
if write_to_disk:
if not grid_output_dir.exists():
try:
grid_output_dir.mkdir()
except:
print ('cannot make %s ' % grid_output_dir)
new_fname = grid_output_dir / (file_basename + '.nc')
if not less_output:
print('making single file grid netcdf')
print(str(new_fname))
if not less_output:
print('\n... creating variable encodings')
# PROVIDE SPECIFIC ENCODING DIRECTIVES FOR EACH DATA VAR
dv_encoding = dict()
for dv in grid.data_vars:
dv_encoding[dv] = {'zlib':True, \
'complevel':5,\
'shuffle':True,\
'_FillValue':None}
# PROVIDE SPECIFIC ENCODING DIRECTIVES FOR EACH COORDINATE
if not less_output:
print('\n... creating coordinate encodings')
coord_encoding = dict()
for coord in grid.coords:
# default encoding: no fill value, float32
coord_encoding[coord] = {'_FillValue':None, 'dtype':'float32'}
if grid[coord].values.dtype == np.int64:
grid[coord].values[:] = grid[coord].astype(np.int32)
coord_encoding[coord]['dtype'] ='int32'
# MERGE ENCODINGS for coordinates and variables
encoding = {**dv_encoding, **coord_encoding}
if not less_output:
print('\n... saving single file to netcdf')
grid.to_netcdf(str(new_fname), encoding=encoding)
# save as 13 tiles
if not less_output:
print('making 13 file grid netcdf')
for i in range(13):
tmp = grid.sel(tile=i)
tmp2 = file_basename + '_TILE_' + str(i).zfill(2) + '.nc'
tmp.attrs['product_name'] = tmp2
new_fname = grid_output_dir / tmp2
if not less_output:
print (new_fname)
tmp.to_netcdf(str(new_fname), encoding=encoding)
return grid
#%%
def get_time_steps_from_mds_files(mds_var_dir, mds_file):
if isinstance(mds_var_dir, str):
mds_var_dir = Path(mds_var_dir)
tmp_files = np.sort(list(mds_var_dir.glob(mds_file + '*meta')))
time_steps = []
print ('get time steps')
print (tmp_files)
for i in range(len(tmp_files)):
time_steps.append(int(tmp_files[i].stem[-10:]))
return time_steps
#%%
# create the interpolated fields. Default is on 0.5 degrees by 0.5 degrees.
def create_nc_variable_files_on_regular_grid_from_mds(mds_var_dir,
mds_files_to_load,
mds_grid_dir,
output_dir,
output_freq_code,
vars_to_load = 'all',
tiles_to_load = [0,1,2,3,4,5,6,7,8,9,10,11,12],
time_steps_to_load = [],
meta_variable_specific = dict(),
meta_common = dict(),
mds_datatype = '>f4',
dlon=0.5, dlat=0.5,
radius_of_influence = 120000,
express=1,
kvarnmidx = 2, # coordinate idx for vertical axis
# method now is only a place holder.
# This can be expanded. For example,
# the global interpolated fields can
# split to tiles, similarly to
# the tiled native fields, to
# reduce the size of each file.
verbose=True,
method = ''):
#%%
# force mds_files_to_load to be a list (if str is passed)
if isinstance(mds_files_to_load, str):
mds_files_to_load = [mds_files_to_load]
# force time_steps_to_load to be a list (if int is passed)
if isinstance(time_steps_to_load, int):
time_steps_to_load = [time_steps_to_load]
# for ce tiles_to_load to be a list (if int is passed)
if isinstance(tiles_to_load, int):
tiles_to_load = [tiles_to_load]
# if no specific file data passed, read default metadata from json file
# -- variable specific meta data
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
if not meta_variable_specific:
meta_variable_rel_path = '../meta_json/ecco_meta_variable.json'
abs_meta_variable_path = os.path.join(script_dir, meta_variable_rel_path)
with open(abs_meta_variable_path, 'r') as fp:
meta_variable_specific = json.load(fp)
# --- common meta data
if not meta_common:
meta_common_rel_path = '../meta_json/ecco_meta_common.json'
abs_meta_common_path = os.path.join(script_dir, meta_common_rel_path)
with open(abs_meta_common_path, 'r') as fp:
meta_common = json.load(fp)
# info for the regular grid
new_grid_min_lat = -90+dlat/2.
new_grid_max_lat = 90-dlat/2.
new_grid_min_lon = -180+dlon/2.
new_grid_max_lon = 180-dlon/2.
new_grid_ny = np.int((new_grid_max_lat-new_grid_min_lat)/dlat + 1 + 1e-4*dlat)
new_grid_nx = np.int((new_grid_max_lon-new_grid_min_lon)/dlon + 1 + 1e-4*dlon)
j_reg = new_grid_min_lat + np.asarray(range(new_grid_ny))*dlat
i_reg = new_grid_min_lon + np.asarray(range(new_grid_nx))*dlon
j_reg_idx = np.asarray(range(new_grid_ny))
i_reg_idx = np.asarray(range(new_grid_nx))
if (new_grid_ny < 1) or (new_grid_nx < 1):
raise ValueError('You need to have at least one grid point for the new grid.')
# loop through each mds file in mds_files_to_load
for mds_file in mds_files_to_load:
# if time steps to load is empty, load all time steps
if len(time_steps_to_load ) == 0:
# go through each file, pull out the time step, add the time step to a list,
# and determine the start and end time of each record.
time_steps_to_load = \
get_time_steps_from_mds_files(mds_var_dir, mds_file)
first_meta_fname = mds_file + '.' + \
str(time_steps_to_load[0]).zfill(10) + '.meta'
# get metadata for the first file and determine which variables
# are present
meta = xm.utils.parse_meta_file(mds_var_dir + '/' + first_meta_fname)
vars_here = meta['fldList']
if not isinstance(vars_to_load, list):
vars_to_load = [vars_to_load]
if 'all' not in vars_to_load:
num_vars_matching = len(np.intersect1d(vars_to_load, vars_here))
print ('num vars matching ', num_vars_matching)
# only proceed if we are sure that the variable we want is in this
# mds file
if num_vars_matching == 0:
print ('none of the variables you want are in ', mds_file)
print (vars_to_load)
print (vars_here)
break
#%%
# load the MDS fields
ecco_dataset_all = \
load_ecco_vars_from_mds(mds_var_dir, \
mds_grid_dir,
mds_file,
vars_to_load = vars_to_load,
tiles_to_load=tiles_to_load,
model_time_steps_to_load=time_steps_to_load,
output_freq_code = \
output_freq_code,
meta_variable_specific = \
meta_variable_specific,
meta_common=meta_common,
mds_datatype=mds_datatype,
llc_method = 'bigchunks')
# do the actual loading. Otherwise, the code may be slow.
ecco_dataset_all.load()
# print(ecco_dataset_all.keys())
# loop through each variable in this dataset,
for var in ecco_dataset_all.keys():
print (' ' + var)
# obtain the grid information (use fields from time=0)
# Note that nrtmp would always equal to one,
# since each outfile will include only one time-record (e.g. daily, monthly avgs.).
ecco_dataset = ecco_dataset_all.isel(time=[0])
var_ds = ecco_dataset[var]
shapetmp = var_ds.shape
lenshapetmp = len(shapetmp)
nttmp = 0
nrtmp = 0
if(lenshapetmp==4):
nttmp = shapetmp[0]
nrtmp = 0
elif(lenshapetmp==5):
nttmp = shapetmp[0]
nrtmp = shapetmp[1]
else:
print('Error! ', var_ds.shape)
sys.exit()
# Get X,Y of the original grid. They could be XC/YC, XG/YC, XC/YG, etc.
# Similar for mask.
# default is XC, YC
if 'i' in var_ds.coords.keys():
XX = ecco_dataset['XC']
XXname = 'XC'
if 'j' in var_ds.coords.keys():
YY = ecco_dataset['YC']
YYname = 'YC'
varmask = 'maskC'
iname = 'i'
jname = 'j'
if 'i_g' in var_ds.coords.keys():
XX = ecco_dataset['XG']
XXname = 'XG'
varmask = 'maskW'
iname = 'i_g'
if 'j_g' in var_ds.coords.keys():
YY = ecco_dataset['YG']
YYname = 'YG'
varmask = 'maskS'
jname = 'j_g'
# interpolation
# To do it fast, set express==1 (default)
if(express==1):
orig_lons_1d = XX.values.ravel()
orig_lats_1d = YY.values.ravel()
orig_grid = pr.geometry.SwathDefinition(lons=orig_lons_1d,
lats=orig_lats_1d)
if (new_grid_ny > 0) and (new_grid_nx > 0):
# 1D grid values
new_grid_lon, new_grid_lat = np.meshgrid(i_reg, j_reg)
# define the lat lon points of the two parts.
new_grid = pr.geometry.GridDefinition(lons=new_grid_lon,
lats=new_grid_lat)
# Get the neighbor info once.
# It will be used repeatedly late to resample data
# fast for each of the datasets that is based on
# the same swath, e.g. for a model variable at different times.
valid_input_index, valid_output_index, index_array, distance_array = \
pr.kd_tree.get_neighbour_info(orig_grid,
new_grid, radius_of_influence,
neighbours=1)
# loop through time steps, one at a time.
for time_step in time_steps_to_load:
i, = np.where(ecco_dataset_all.timestep == time_step)
if(verbose):
print (ecco_dataset_all.timestep.values)
print ('time step ', time_step, i)
# load the dataset
ecco_dataset = ecco_dataset_all.isel(time=i)
# pull out the year, month day, hour, min, sec associated with
# this time step
if type(ecco_dataset.time.values) == np.ndarray:
cur_time = ecco_dataset.time.values[0]
else:
cur_time = ecco_dataset.time.values
#print (type(cur_time))
year, mon, day, hh, mm, ss = \
extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(cur_time)
print(year, mon, day)
# if the field comes from an average,
# extract the time bounds -- we'll use it before we save
# the variable
if 'AVG' in output_freq_code:
tb = ecco_dataset.time_bnds
tb.name = 'tb'
var_ds = ecco_dataset[var]
# 3d fields (with Z-axis) for each time record
if(nttmp != 0 and nrtmp != 0):
tmpall = np.zeros((nttmp, nrtmp,new_grid_ny,new_grid_nx))
for ir in range(nrtmp): # Z-loop
# mask
maskloc = ecco_dataset[varmask].values[ir,:]
for it in range(nttmp): # time loop
# one 2d field at a time
| |
#!/usr/bin/env python
"""
A component of a findNeighbour4 server which provides relatedness information for bacterial genomes.
It does so using PCA, and supports PCA based cluster generation.
he associated classes compute a variation model for samples in a findNeighbour4 server.
Computation uses data in MongoDb, and is not memory intensive, using configuration information in a
config file.
Functionality is provided in following classes:
* VariationModel - stores results of producing variant matrix and running PCA
* VariantMatrix - computes sample x variant matrix (requires: PERSIST object for mongodb access; server configuration file)
* PCARunner - runs PCA on VariantMatrix
Unit testing is facilitated by a
* PersistenceTest class. This exposes a small subset of the fn3persist object's methods, sufficient to test PCA. It can be used to store subsets of data for testing purposes
without then need to access a real fn3persistence data store.
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 <NAME> <EMAIL>
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
bu
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without tcen the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
"""
# import libraries
import os
import logging
import warnings
import datetime
import random
from typing import Tuple, Set
from collections import defaultdict
import hashlib
import json
import pathlib
import pandas as pd
import numpy as np
from scipy.stats import poisson
import progressbar
import sqlalchemy
from scipy.stats import binom_test, median_abs_deviation
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
class PersistenceTest:
"""a class which mimics some methods available in an fn3persistence object, sufficient to unit test PCA generation.
Only these methods are implemented:
__init__
refcompressedsequence_guids
refcompressedsequence_read
Additionally, a load_data method is provided which loads data into the object.
Data in the correct format can be generated by utils/temporal_subsets.py
"""
def __init__(self, **kwargs):
"""constructs the object. Any parameters are accepted, and none have any effect"""
self.seqs = {}
self.sample_ids = set([])
def load_data(self, sample_ids_file, sequences_file):
with open("testdata/pca/seqs_5000test.json", "rt") as f:
self.seqs = json.load(f)
for guid in self.seqs.keys():
for key in ["<KEY>"]:
self.seqs[guid][key] = set(self.seqs[guid][key])
with open("testdata/pca/sample_ids_5000test.json", "rt") as f:
self.sample_ids = set(json.load(f))
# sanity check
# check there are no samples in sample_ids which are not present in seqs
# sample_ids are allowed to be a subset of seqs, but
# no samples should exists in sample_ids which aren't in seqs
missing = self.sample_ids - set(self.seqs.keys())
if len(missing) > 0:
raise KeyError(
"Provided with sample_ids which are not in seqs. There are {0} such sequences. Examples are: {1}".format(
len(missing), missing
)
)
def refcompressedsequence_guids(self):
return self.sample_ids
def refcompressedsequence_read(self, guid):
"""read a single sequence"""
if guid not in self.sample_ids:
return None
return self.seqs[guid]
def refcompressedsequence_read_all(self):
"""reads all sequences, returning a generator"""
for guid in self.sample_ids:
yield guid, self.seqs[guid]
class MNStats:
"""computes the number of M and N bases in a reference compressed object"""
def __init__(self, select_positions, analysed_reference_length):
"""input:
select_positions: the positions contributing to the pca model, as generated by ModelBuilder.
analysed_reference_length: the number of reference bases analysed."""
self.select_positions = select_positions
self.analysed_reference_length = analysed_reference_length
def examine(self, obj):
"""examines the reference compressed object obj,
reporting
* number of Ns and Ms in the sequence, subdivided by whether they are in
select_positions
* "Test 2" (binomial test, as per findNeighbour3 paper) testing whether the frequency of Ns/Ms in the selected_positions exceed those elsewhere,
indicative of a mixture."""
missing = {
"m_in_model": 0,
"n_in_model": 0,
"model_positions": len(self.select_positions),
"reference_positions": self.analysed_reference_length,
}
for base in ["M", "N"]: # compute missingness
base_l = base.lower()
# record total numbers of N and M per guid
try:
missing["{0}_total".format(base_l)] = len(obj[base])
except KeyError:
missing["{0}_total".format(base_l)] = 0
# examine all missing (N/M) sites, adding to a missingness model
try:
for pos in obj[base]:
if pos in self.select_positions:
try:
missing["{0}_in_model".format(base_l)] += 1
except KeyError:
pass
except KeyError:
pass # if there are no M,N then we can ignore these
## do binomial test
not_model = self.analysed_reference_length - len(self.select_positions)
p_expected = (
missing["{0}_total".format(base_l)]
- missing["{0}_in_model".format(base_l)]
) / not_model
missing["{0}_expected_proportion".format(base_l)] = p_expected
p_observed = missing["{0}_in_model".format(base_l)] / len(
self.select_positions
)
missing["{0}_observed_proportion".format(base_l)] = p_observed
p_val = binom_test(
missing["{0}_in_model".format(base_l)],
len(self.select_positions),
p_expected,
alternative="greater",
)
missing["{0}_p_value".format(base_l)] = p_val
return missing
class VariationModel:
"""Stores a VariantMatrix, the output of a PCA of the matrix, and (optionally) a clustering of the principal components.
You should not normally have to call this class directly to create a VariationModel - the VariantMatrix class would do this for you.
- You might wish to instantiate this class directly if you are restoring a previously serialised VariationModel - see constructor"""
def __init__(self):
"""
creates a new Variation model.
"""
self.model = {"built": False}
return
def __getitem__(self, key):
if key not in self.model:
raise KeyError(f"Key {key} not found")
return self.model[key]
def __setitem__(self, key, value):
"""adds a key-value pair to the model"""
if key in self.model.keys():
raise KeyError(f"Cannot replace key {key}")
else:
self.model[key] = value
def _coefficients_hash(self):
"""computes a hash on the coefficients in the variant model.
This is useful for version tracking & storing patterns of masking."""
h = hashlib.md5()
h.update(self.model["eigenvectors"].to_csv().encode("utf-8"))
md5_l = h.hexdigest()
return "{0}".format(md5_l)
def finish(self):
"""completes construction of the VariationModel"""
self.model["build_time"] = datetime.datetime.now().isoformat()
self.model["coefficients_hash"] = self._coefficients_hash()
self.model["built"] = True
def to_sqlite(
self,
outputdir="",
analysis_name="pca_output",
rebuild_databases_if_present=True,
):
"""write output to sqlite database
Inputs
=======
outputdir the directory the SQLite database goes into. will create if it does not exist
analysis_name name of the analysis. Will become the first part of the file name
rebuild_databases_if_present delete any existing SQLite database
Returns
=======
path to sqlite database
"""
# ensure the outputdir exists
pathlib.Path(outputdir).mkdir(parents=True, exist_ok=True)
# configure sqlite file for output.
sqlite_file = os.path.join(outputdir, "{0}.sqlite".format(analysis_name))
engine = sqlalchemy.create_engine(
"sqlite:///{0}".format(sqlite_file), echo=False
)
# run checks on sqlite file
if rebuild_databases_if_present:
try:
os.unlink(sqlite_file)
except FileNotFoundError:
pass
# open connection
conn = engine.connect()
metadata = []
for key in self.model:
if not (
key == "variant_matrix"
or isinstance(self.model[key], PCA)
or key == "transformed_coordinates"
): # we don't serialise these; one is massive and the other can't be serialised
logging.info("Writing {0}".format(key))
native_type = type(self.model[key])
if native_type in [bool, int, float, str]:
metadata.append(
{
"variable": key,
"value": str(self.model[key]),
"native_type": str(native_type),
}
)
elif type(self.model[key]) in [set, list]:
if type(self.model[key]) == set:
list_data = sorted(list(self.model[key]))
else:
list_data = self.model[key]
tmp = pd.DataFrame(list_data, columns=[key])
tmp.to_sql(
key, conn, if_exists="fail"
) # we don't serialise these at present
elif type(self.model[key]) in [dict, defaultdict]:
output_records = []
for this_key in self.model[key].keys():
item = self.model[key][this_key]
if type(item) in [float, bool, int, str]:
item = [item]
if not type(item) == list:
raise TypeError(
"Can only export dictionaries which are of key:list or key:scalar format; the list element is of type {0} : {1}".format(
type(item), item
)
)
for list_element in item:
output_records.append(
{key: this_key, "value": list_element}
)
tmp = pd.DataFrame.from_records(output_records)
elif type(self.model[key]) == np.int64:
metadata.append(
{"variable": key, "value": str(int(self.model[key]))}
)
elif type(self.model[key]) == pd.core.frame.DataFrame:
# these types of dataframe have indices which are sample_ids. We relabel the index sample_id
if key in ["mix_quality_info", "suspect_quality_seqs"]:
self.model[key].rename_axis("sample_id")
self.model[key].to_sql(key, conn, if_exists="fail")
else:
warnings.warn(
"Not handled {0} with class {1}".format(
key, type(self.model[key])
)
)
metadata_df = pd.DataFrame.from_records(metadata)
metadata_df.to_sql("Metadata", conn, if_exists="fail")
conn.close()
return sqlite_file
class VariantMatrix:
"""In charge of producing a sample x SNP matrix"""
def __init__(self, CONFIG, PERSIST, show_bar=True):
"""Construct a variant matrix
Parameters:
CONFIG: a configuration dictionary, as produced by findn.common_utils.ConfigManager.read_config()
PERSIST: a persistence object providing access to stored sequence data.
Any of the following will work:
findn.mongoStore.fn3persistence object, or
findn.rdbmstore.fn3persistence, or
localstore.localstoreutils.LocalStore object (fast access from a local tar file - preferred for large datasets), or
PersistenceTest object, the latter being useful for unit testing.
show_bar: whether or not to show a progress bar
"""
# store the persistence object as part of the object
self.PERSIST = PERSIST
# set easy to read properties from the config
self.analysed_reference_length = len(CONFIG["reference"]) - len(
set(CONFIG["excludePositions"])
)
# store whether we're using bars for | |
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDeviceSlot.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.idl
import raritan.rpc.peripheral
import raritan.rpc.sensors
# enumeration
class PortType(Enumeration):
idlType = "peripheral.PortType:1.0.0"
values = [
"ONEWIRE_ONBOARD",
"ONEWIRE_DEV_PORT",
"ONEWIRE_HUB_PORT",
"ONEWIRE_CHAIN_POS",
]
PortType.ONEWIRE_ONBOARD = PortType(0)
PortType.ONEWIRE_DEV_PORT = PortType(1)
PortType.ONEWIRE_HUB_PORT = PortType(2)
PortType.ONEWIRE_CHAIN_POS = PortType(3)
# structure
class PosElement(Structure):
idlType = "peripheral.PosElement:1.0.0"
elements = ["portType", "port"]
def __init__(self, portType, port):
typecheck.is_enum(portType, raritan.rpc.peripheral.PortType, AssertionError)
typecheck.is_string(port, AssertionError)
self.portType = portType
self.port = port
@classmethod
def decode(cls, json, agent):
obj = cls(
portType=raritan.rpc.peripheral.PortType.decode(json["portType"]),
port=json["port"],
)
return obj
def encode(self):
json = {}
json["portType"] = raritan.rpc.peripheral.PortType.encode(self.portType)
json["port"] = self.port
return json
# structure
class DeviceID(Structure):
idlType = "peripheral.DeviceID:2.0.0"
elements = ["serial", "type", "isActuator", "channel"]
def __init__(self, serial, type, isActuator, channel):
typecheck.is_string(serial, AssertionError)
typecheck.is_struct(type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_int(channel, AssertionError)
self.serial = serial
self.type = type
self.isActuator = isActuator
self.channel = channel
@classmethod
def decode(cls, json, agent):
obj = cls(
serial=json["serial"],
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
channel=json["channel"],
)
return obj
def encode(self):
json = {}
json["serial"] = self.serial
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["channel"] = self.channel
return json
# structure
class Address(Structure):
idlType = "peripheral.Address:2.0.0"
elements = ["position", "type", "isActuator", "channel"]
def __init__(self, position, type, isActuator, channel):
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_struct(type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_int(channel, AssertionError)
self.position = position
self.type = type
self.isActuator = isActuator
self.channel = channel
@classmethod
def decode(cls, json, agent):
obj = cls(
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
channel=json["channel"],
)
return obj
def encode(self):
json = {}
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["channel"] = self.channel
return json
# value object
class Device(ValueObject):
idlType = "peripheral.Device:2.0.0"
def __init__(self, deviceID, position, packageClass, device):
typecheck.is_struct(deviceID, raritan.rpc.peripheral.DeviceID, AssertionError)
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_string(packageClass, AssertionError)
typecheck.is_interface(device, raritan.rpc.sensors.Sensor, AssertionError)
self.deviceID = deviceID
self.position = position
self.packageClass = packageClass
self.device = device
def encode(self):
json = {}
json["deviceID"] = raritan.rpc.peripheral.DeviceID.encode(self.deviceID)
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["packageClass"] = self.packageClass
json["device"] = Interface.encode(self.device)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
deviceID=raritan.rpc.peripheral.DeviceID.decode(json["deviceID"], agent),
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
packageClass=json["packageClass"],
device=Interface.decode(json["device"], agent),
)
return obj
def listElements(self):
elements = ["deviceID", "position", "packageClass", "device"]
return elements
# interface
class DeviceSlot(Interface):
idlType = "peripheral.DeviceSlot:2.0.0"
ERR_INVALID_PARAMS = 1
ERR_NOT_SUPPORTED = 2
CHANNEL_INVALID = -1
# structure
class Location(Structure):
idlType = "peripheral.DeviceSlot.Location:1.0.0"
elements = ["x", "y", "z"]
def __init__(self, x, y, z):
typecheck.is_string(x, AssertionError)
typecheck.is_string(y, AssertionError)
typecheck.is_string(z, AssertionError)
self.x = x
self.y = y
self.z = z
@classmethod
def decode(cls, json, agent):
obj = cls(
x=json["x"],
y=json["y"],
z=json["z"],
)
return obj
def encode(self):
json = {}
json["x"] = self.x
json["y"] = self.y
json["z"] = self.z
return json
# structure
class Settings(Structure):
idlType = "peripheral.DeviceSlot.Settings:1.0.0"
elements = [
"name",
"description",
"location",
"useDefaultThresholds",
"properties",
]
def __init__(
self, name, description, location, useDefaultThresholds, properties
):
typecheck.is_string(name, AssertionError)
typecheck.is_string(description, AssertionError)
typecheck.is_struct(
location, raritan.rpc.peripheral.DeviceSlot.Location, AssertionError
)
typecheck.is_bool(useDefaultThresholds, AssertionError)
self.name = name
self.description = description
self.location = location
self.useDefaultThresholds = useDefaultThresholds
self.properties = properties
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
description=json["description"],
location=raritan.rpc.peripheral.DeviceSlot.Location.decode(
json["location"], agent
),
useDefaultThresholds=json["useDefaultThresholds"],
properties=dict(
[(elem["key"], elem["value"]) for elem in json["properties"]]
),
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["description"] = self.description
json["location"] = raritan.rpc.peripheral.DeviceSlot.Location.encode(
self.location
)
json["useDefaultThresholds"] = self.useDefaultThresholds
json["properties"] = [
dict(key=k, value=v) for k, v in self.properties.items()
]
return json
# value object
class DeviceChangedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceSlot.DeviceChangedEvent:1.0.0"
def __init__(self, oldDevice, newDevice, source):
super(raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self).__init__(
source
)
typecheck.is_valobj(
oldDevice, raritan.rpc.peripheral.Device, AssertionError
)
typecheck.is_valobj(
newDevice, raritan.rpc.peripheral.Device, AssertionError
)
self.oldDevice = oldDevice
self.newDevice = newDevice
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self
).encode()
json["oldDevice"] = ValueObject.encode(self.oldDevice)
json["newDevice"] = ValueObject.encode(self.newDevice)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldDevice=ValueObject.decode(json["oldDevice"], agent),
newDevice=ValueObject.decode(json["newDevice"], agent),
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldDevice", "newDevice"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self
).listElements()
)
return elements
# value object
class SettingsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "peripheral.DeviceSlot.SettingsChangedEvent:1.0.0"
def __init__(self, oldSettings, newSettings, actUserName, actIpAddr, source):
super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(
oldSettings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
typecheck.is_struct(
newSettings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).encode()
json["oldSettings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(
self.oldSettings
)
json["newSettings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(
self.newSettings
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.peripheral.DeviceSlot.Settings.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.peripheral.DeviceSlot.Settings.decode(
json["newSettings"], agent
),
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).listElements()
)
return elements
def getDevice(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDevice", args)
_ret_ = ValueObject.decode(rsp["_ret_"], agent)
typecheck.is_valobj(_ret_, raritan.rpc.peripheral.Device, DecodeException)
return _ret_
def assign(self, devid):
agent = self.agent
typecheck.is_struct(devid, raritan.rpc.peripheral.DeviceID, AssertionError)
args = {}
args["devid"] = raritan.rpc.peripheral.DeviceID.encode(devid)
rsp = agent.json_rpc(self.target, "assign", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def assignAddress(self, packageClass, address):
agent = self.agent
typecheck.is_string(packageClass, AssertionError)
typecheck.is_struct(address, raritan.rpc.peripheral.Address, AssertionError)
args = {}
args["packageClass"] = packageClass
args["address"] = raritan.rpc.peripheral.Address.encode(address)
rsp = agent.json_rpc(self.target, "assignAddress", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def unassign(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "unassign", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getSettings(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getSettings", args)
_ret_ = raritan.rpc.peripheral.DeviceSlot.Settings.decode(rsp["_ret_"], agent)
typecheck.is_struct(
_ret_, raritan.rpc.peripheral.DeviceSlot.Settings, DecodeException
)
return _ret_
def setSettings(self, settings):
agent = self.agent
typecheck.is_struct(
settings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
args = {}
args["settings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(settings)
rsp = agent.json_rpc(self.target, "setSettings", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDevicePackage.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.peripheral
# structure
class PackageInfo(Structure):
idlType = "peripheral.PackageInfo:2.0.0"
elements = ["state", "position", "hwInfo", "fwInfo"]
def __init__(self, state, position, hwInfo, fwInfo):
typecheck.is_enum(
state, raritan.rpc.peripheral.PackageInfo.State, AssertionError
)
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_struct(
hwInfo, raritan.rpc.peripheral.PackageInfo.HardwareInfo, AssertionError
)
typecheck.is_struct(
fwInfo, raritan.rpc.peripheral.PackageInfo.FirmwareInfo, AssertionError
)
self.state = state
self.position = position
self.hwInfo = hwInfo
self.fwInfo = fwInfo
@classmethod
def decode(cls, json, agent):
obj = cls(
state=raritan.rpc.peripheral.PackageInfo.State.decode(json["state"]),
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
hwInfo=raritan.rpc.peripheral.PackageInfo.HardwareInfo.decode(
json["hwInfo"], agent
),
fwInfo=raritan.rpc.peripheral.PackageInfo.FirmwareInfo.decode(
json["fwInfo"], agent
),
)
return obj
def encode(self):
json = {}
json["state"] = raritan.rpc.peripheral.PackageInfo.State.encode(self.state)
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["hwInfo"] = raritan.rpc.peripheral.PackageInfo.HardwareInfo.encode(
self.hwInfo
)
json["fwInfo"] = raritan.rpc.peripheral.PackageInfo.FirmwareInfo.encode(
self.fwInfo
)
return json
# enumeration
class State(Enumeration):
idlType = "peripheral.PackageInfo.State:1.0.0"
values = ["NORMAL", "FW_UPDATE", "INTERNAL_ERROR", "CONFIG_ERROR"]
State.NORMAL = State(0)
State.FW_UPDATE = State(1)
State.INTERNAL_ERROR = State(2)
State.CONFIG_ERROR = State(3)
# structure
class HardwareInfo(Structure):
idlType = "peripheral.PackageInfo.HardwareInfo:1.0.0"
elements = [
"serial",
"packageClass",
"model",
"minDowngradeVersion",
"revision",
]
def __init__(self, serial, packageClass, model, minDowngradeVersion, revision):
typecheck.is_string(serial, AssertionError)
typecheck.is_string(packageClass, AssertionError)
typecheck.is_string(model, AssertionError)
typecheck.is_int(minDowngradeVersion, AssertionError)
typecheck.is_string(revision, AssertionError)
self.serial = serial
self.packageClass = packageClass
self.model = model
self.minDowngradeVersion = minDowngradeVersion
self.revision = revision
@classmethod
def decode(cls, json, agent):
obj = cls(
serial=json["serial"],
packageClass=json["packageClass"],
model=json["model"],
minDowngradeVersion=json["minDowngradeVersion"],
revision=json["revision"],
)
return obj
def encode(self):
json = {}
json["serial"] = self.serial
json["packageClass"] = self.packageClass
json["model"] = self.model
json["minDowngradeVersion"] = self.minDowngradeVersion
json["revision"] = self.revision
return json
# structure
class FirmwareInfo(Structure):
idlType = "peripheral.PackageInfo.FirmwareInfo:1.0.0"
elements = ["compileDate", "version", "updateDate"]
def __init__(self, compileDate, version, updateDate):
typecheck.is_time(compileDate, AssertionError)
typecheck.is_struct(
version,
raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version,
AssertionError,
)
typecheck.is_time(updateDate, AssertionError)
self.compileDate = compileDate
self.version = version
self.updateDate = updateDate
@classmethod
def decode(cls, json, agent):
obj = cls(
compileDate=raritan.rpc.Time.decode(json["compileDate"]),
version=raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version.decode(
json["version"], agent
),
updateDate=raritan.rpc.Time.decode(json["updateDate"]),
)
return obj
def encode(self):
json = {}
json["compileDate"] = raritan.rpc.Time.encode(self.compileDate)
json[
"version"
] = raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version.encode(
self.version
)
json["updateDate"] = raritan.rpc.Time.encode(self.updateDate)
return json
# structure
class Version(Structure):
idlType = "peripheral.PackageInfo.FirmwareInfo.Version:1.0.0"
elements = ["majorNumber", "minorNumber"]
def __init__(self, majorNumber, minorNumber):
typecheck.is_int(majorNumber, AssertionError)
typecheck.is_int(minorNumber, AssertionError)
self.majorNumber = majorNumber
self.minorNumber = minorNumber
@classmethod
def decode(cls, json, agent):
obj = cls(
majorNumber=json["majorNumber"],
minorNumber=json["minorNumber"],
)
return obj
def encode(self):
json = {}
json["majorNumber"] = self.majorNumber
json["minorNumber"] = self.minorNumber
return json
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDeviceManager.idl"
#
import | |
# turingmachine.py - implementation of the Turing machine model
#
# Copyright 2014 <NAME>.
#
# This file is part of turingmachine.
#
# turingmachine is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# turingmachine is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# turingmachine. If not, see <http://www.gnu.org/licenses/>.
"""Provides an implementation of the Turing machine model."""
import logging
import os.path
# Create and configure the logger which logs debugging information by default.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level=logging.DEBUG)
#: Represents a movement of the read/write head of the Turing machine to the
#: left.
L = -1
#: Represents a movement of the read/write head of the Turing machine to the
#: right.
R = +1
#Stores the loaded TM, output and logger state
tm=[None]
output=[None]
logs=[True]
class UnknownSymbol(Exception):
"""This exception is raised when the Turing machine encounters a symbol
that does not appear in the transition dictionary.
"""
pass
class UnknownState(Exception):
"""This exception is raised when the Turing machine enters a state that
does not appear in the transition dictionary.
"""
pass
class BadSymbol(Exception):
"""This exception is raised when the user attempts to specify a tape
alphabet that includes strings of length not equal to one.
"""
pass
class BadFile(Exception):
"""This exception is raised when the file given by the user is
inconsistent with the rules for it.
"""
pass
class NoMachine(Exception):
"""This exception is raised when the user tries to run without having
loaded any machine.
"""
pass
def load(file, max_steps=10000):
'''Import parameters from given file, that has to be in the following
structure:
1st line: "ATM"
2nd line: Any comment you might want
3rd line: Input alphabet
4th line: Tape alphabet
5th line: Number of tapes (must be 1 yet)
6th line: Number of trails on each tape (must be 1 yet)
7th line: Number of directions on which the tapes are infinite (must be 2 yet)
8th line: Initial state
9th line: Final state
10th line and beyond: transitions in the following way:
current state
symbol read
next state
symbol to be written
direction to move
last line: "end"
Comments are made with "//"
'''
#Check if file exists, open it and separate lines
if not os.path.exists(file): raise BadFile("File does not exist.")
file=open(file, "r")
file=file.read()
file=file.split("\n")
#Create new list removing every comment
raw=[]
for line in file:
i=0
while i<len(line):
if line[i]=="/"and line[i+1]=="/":
raw.append(line[:i])
break
i+=1
if i==len(line): raw.append(line)
#Create descriptor separating words and removing null strings/lists
desc=[]
for line in raw:
carac=0
while carac<len(line):
if line[carac]=="\t": line[carac]=" "
carac+=1
desc.append(line.split(" "))
i=0
while i<len(desc[-1]):
if desc[-1][i]=="":
desc[-1].pop(i)
i-=1
i+=1
line=0
while line<len(desc):
if len(desc[line])==0:
desc.pop(line)
line-=1
line+=1
#Check static values in code and raise exceptions
if desc[0][0]!="ATM": raise BadFile("First line is wrong.")
if desc[4][0]!="1": raise BadFile("Number of tapes must be 1.")
if desc[5][0]!="1": raise BadFile("Number of trails must be 1.")
if desc[6][0]!="2": raise BadFile("Number of infinite directions must be 2.")
if desc[-1][0]!="end": raise BadFile("Last line is wrong.")
#Get the input and tape alphabets
in_alph=desc[2]
tp_alph=desc[3]
#Get initial and final states
if len(desc[7])>1: raise BadFile("Initial state must be one only thing.")
ini=desc[7][0]
if len(desc[8])>1: raise BadFile("Final state must be one only thing.")
fin=desc[8][0]
#Remove from escriptor all the preamble and last line
desc=desc[9:-1]
#generate transitions dictionary with the remaining lines
transitions={}
states=[]
i=0
while i<len(desc):
#Check if the line sintax is correct and raise exceptions
if len(desc[i])!=5: raise BadFile("Transition "+str(i+1)+" badly formulated.")
if desc[i][1] not in tp_alph: raise BadFile("All symbols must be declared in input alphabet.")
if desc[i][3] not in tp_alph: raise BadFile("All symbols must be declared in input alphabet.")
if desc[i][4]!="R" and desc[i][4]!="L": raise BadFile("Directions must be either R or L.")
#Add transition
#If state alredy exist in states list, just add the transition depending on direction
if desc[i][0] in states:
if desc[i][4]=="R":transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],R)
else:transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],L)
#Otherwise add state and dictionary for it in transitions and add the transition
else:
states.append(desc[i][0])
transitions[desc[i][0]]={}
if desc[i][4]=="R":transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],R)
else:transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],L)
#If destination state does not exist, create it and it's dictionary in transitions
if not desc[i][2] in states:
states.append(desc[i][2])
transitions[desc[i][2]]={}
i+=1
#Check if initial and final states are in the transitions and raise errors
if not ini in states: raise BadFile("Initial state must be in transitions.")
if not fin in states: raise BadFile("Final state must be in transitions")
#Return the TM class
tm[0]=TuringMachine(states, in_alph, ini, fin, transitions, max_steps)
def run(string):
'''Runs the Turing Machine with given string
'''
if tm[0]==None: raise NoMachine("You must first load an machine.")
print(tm[0](string))
def test(io_file):
'''Test all the cases in given file and return correctness percentage
'''
if not os.path.exists(io_file): raise BadFile(io_file+" does not exist.")
file=open(io_file)
file=file.read()
file=file.split("\n")
line=0
while line<len(file):
file[line]=file[line].split(" ")
item=0
while item<len(file[line]):
if file[line][item]=="":
file[line].pop(item)
item-=1
item+=1
if len(file[line])==0 or file[line][0][0]=="#":
file.pop(line)
line-=1
line+=1
tests=[]
for line in file:
tests.append([line[0], line[2]])
logs[0]=False
cont=0
n=1
for test in tests:
res=tm[0](test[0])
if res:
string=output[0]
while string[0]=="B": string=string[1:]
while string[-1]=="B": string=string[:-1]
if string==test[1]:
cont+=1
print("test "+str(n)+": "+"\033[92m {}\033[00m" .format("Correct"))
else:
print("test "+str(n)+": "+"\033[91m {}\033[00m" .format("Wrong"))
elif test[1]=="STOP_FAIL":
cont+=1
print("test "+str(n)+": "+"\033[92m {}\033[00m" .format("Correct"))
else:
print("test "+str(n)+": "+"\033[91m {}\033[00m" .format("Wrong"))
n+=1
logs[0]=True
print(f"Nota final: {round(10*cont/len(tests), 2)}")
class TuringMachine(object):
"""An implementation of the Turing machine model.
Once instantiated, the Turing machine can be executed by calling it, and it
can be reset to its initial state by calling :meth:`reset`.
`states` is a set of states. A "state" can be anything, but usually simple
integers suffice.
`initial_state` is the state of the machine before it starts reading
symbols from an input string. This state must be a member of `states`. When
:meth:`reset` is called, the state of the machine will be set to this
state.
`accept_state` is the state that will cause the machine to halt and accept
(that is, return ``True``). This set must be a member of `states`.
`transition` is a two-dimensional dictionary specifying how the
"configuration" of the machine (that is, the head location, state, and
string) changes each time it reads from its input string. The dictionary is
indexed first by state, then by symbol. Each entry in this two-dimensional
dictionary must be a three-tuple, *(new_state, new_symbol, direction)*,
where *new_state* is the next state in which the Turing machine will be,
*new_symbol* is the symbol that will be written in the current location on
the string, and *direction* is either :data:`L` or :data:`R`, representing
movement of the head left or right, repectively.
`max_steps` is the maximum number of steps the machine can walk before
it is considered infinite loop and returns False
The transition dictionary need not have an entry for the accept and reject
states. For example, the accept and reject states need not be in
`transition`, because the implementation of :meth:`__call__` checks if this
Turing machine has entered one of these states and immediately halts
execution.
Altohugh they would otherwise be necessary in the formal mathematical
definition of a Turing machine, this class requires the user to specify
neither the input alphabet nor the tape alphabet.
"""
def __init__(self, states, in_alph, initial_state, accept_state,
transition, max_steps, *args, **kw):
self.states = states
self.in_alph=in_alph
self.accept_state = accept_state
self.initial_state = initial_state
self.transition = transition
self.max_steps=max_steps
def _log_state(self, string, head_location, current_state):
"""Logs a visual representation of the current head location, state,
and contents of the tape of this Turing machine.
For example, if the Turing machine has ``'_010_'`` on its input tape
(that is, if `string` is ``'_010_'``), is in state ``4``, and has
read/write head at the location of the ``1`` symbol, this method would
log the following messages, one line at a time.
_010_
^
4
The caret | |
<reponame>maurov/xraysloth
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analytical expression of $\Delta \theta (x, z)$ from Wittry
"""
from __future__ import print_function
import sys, os
import math
import numpy as np
import numpy.ma as ma
from sloth.io.specfile_writer import SpecfileDataWriter
# ----------------
# Global variables
# ----------------
DEG2RAD = 0.017453292519943295 # np.pi / 180
RAD2DEG = 57.29577951308232 # 180 / np.pi
# from Saint-Gobain (SG) crystals
SI_ALAT = 5.431 # cubic
SI_2D111 = 6.271 # from Matjaz
GE_ALAT = 5.658 # cubic
INSB_ALAT = 6.48 # cubic
SIO2_A = 4.913 # beta-quartz, hexagonal
SIO2_C = 5.405
SIO2_2D101 = 6.684 # from SG
#SIO2_2D100 = 8.514 # from SG
SIO2_2D100 = 8.510 # from Matjaz
INSB_2D111 = 7.480 # from SG
#
DEBUG = False
def mapCase2Num(case):
dc2n = {'Johann' : 1,
'Jn' : 1,
'Johansson' : 2,
'Js' : 2,
'Spherical plate' : 3,
'Spherical Jn' : 3,
'SphJn' : 3,
'Spherical Johansson' : 4,
'Spherical Js' : 4,
'SphJs' : 4,
'Wittry' : 5,
'Toroidal Js' : 5,
'TorJs' : 5,
'Js 45 deg focusing' : 6,
'Js45focus' : 6,
'Js focusing' : 7,
'JsFocus' : 7,
'Berreman' : 8,
'Jn focusing' : 9,
'JnFocus' : 9}
try:
return dc2n[case]
except:
return 0
def mapNum2Case(case, mode='label'):
if (mode == 'label'):
dn2c = {1 : 'Jn',
2 : 'Js',
3 : 'SphJn',
4 : 'SphJs',
5 : 'TorJs',
6 : 'Js45focus',
7 : 'JsFocus',
8 : 'Berreman',
9 : 'JnFocus'}
else:
dn2c = {1 : 'Johann',
2 : 'Johansson',
3 : 'Spherical Jn',
4 : 'Spherical Johansson',
5 : 'Toroidal Js',
6 : 'Js 45 deg focusing',
7 : 'Js focusing',
8 : 'Berreman',
9 : 'Jn focusing'}
try:
return dn2c[case]
except:
return 'Unknown'
def dThetaXZ(x, z, thetab, case=None):
"""Analytical espression of the angular deviation from Bragg
reflection over a diffractor in conventional point-to-point
focusing geometries
Parameters
==========
x, z : masked array of floats
2D meshgrid where to evaluate dThetaXZ
thetab : float
Bragg angle [deg]
case : int or str
diffractor geometries
1 or 'Johann' or 'Jn' [cylindrical]
2 or 'Johansson' or 'Js' [cylindrical]
3 or 'Spherical plate' or 'Spherical Jn' or 'SphJn'
4 or 'Spherical Johansson' or 'Spherical Js' or 'SphJs'
5 or 'Wittry' or 'Toroidal Js' or 'TorJs'
6 or 'Js 45 deg focusing' or 'Js45focus'
7 or 'Js focusing' or 'JsFocus'
8 or 'Berreman'
9 or 'Jn focusing' or 'JnFocus'
Returns
=======
dThetaXZ : 2D masked array
Notes
=====
The following expression is taken from table I in
[Wittry:1992_JAP]_. This is the same of Eq.12-13 in [Pestehe2]_
and holds within the following approximations:
- the source is an ideal point source located on the Rowland circle
- the diffractor size is small compared with the radius of the focal circle
.. math::
\begin{eqnarray}\label{eq:dthetaxz}
\Delta \theta (x,z) = A_1 x^2 + A_2 x^3 + A_3 z^2 + A_4 xz^2 \nonumber \\
where: \nonumber \\
A_1 = \cot \theta_B \left( 1-\frac{1}{2R_{1}} \right) \nonumber \\
A_2 = \cot^2\ \theta_B \left( 1-\frac{1}{2R_{1}} \right) \nonumber \\
A_3 = \frac{\tan \theta_B}{2} \left[ \frac{1}{R_{2}} - \frac{1}{R_{2}^{\prime}} + \frac{1}{\sin \theta_{B}^{2}} \left( \frac{2}{R_{2}^{\prime}} - \frac{1}{R_{2}} -1 \right) - A_{4}^{\prime} \right] \nonumber \\
A_4 = \frac{1}{2} \left[ \frac{1}{R_{2}} + \frac{1}{\sin \theta_{B}^{2}} \left( \frac{2}{R_{2}^{2}} - \frac{1}{R_{2}} -2 \right) \right] - \frac{A_{4}^{\prime}}{2} \nonumber \\
A_4^{\prime} = \frac{1-R_{2}^{\prime}}{R_{2}^{\prime}^{2}} \nonumber\\
\end{eqnarray}
.. [Wittry:1992_JAP] <NAME> and <NAME>, J. Appl. Phys. **71** (1992) 564
.. [Pestehe2] <NAME>, J. Appl. Cryst. **45** (2012) 890-901
The parametrization is expressed with 4 radii: 1) in the
meridional (dispesion) direction (x), R$_{1}$ and R$_{1}^\prime$
and 2) in the sagittal (focusing) direction (z), R$_{2}$ and
R$_{2}^\prime$. In each direction the two radii, R and R$^\prime$,
are for the crystal surface and planes, respectively. The
implemented cases are the following ones:
|------+----------------------+----------+----------------+------------------+------------------|
| Case | Name | R$_{1}$ | R$_{1}^\prime$ | R$_{2}$ | R$_{2}^\prime$ |
| | | surface | planes | surface | planes |
|------+----------------------+----------+----------------+------------------+------------------|
| 1 | Johann (Jn) | 1. | 1. | $\infty$ | $\infty$ |
| 2 | Johansson (Js) | 0.5 | 1. | $\infty$ | $\infty$ |
| 3 | Spherical Jn (SphJn) | 1. | 1. | 1. | 1. |
| 4 | Spherical Js (SphJs) | 0.5 | 1. | 0.5 | 1. |
| 5 | Wittry (TorJs) | 0.5 | 1. | 1. | 1. |
| 6 | Js 45 deg focusing | 0.5 | 1. | 0.5 | 0.5 |
| 7 | Js focusing | 0.5 | 1. | $\sin^2(\theta)$ | $\sin^2(\theta)$ |
| 8 | Berreman | $\infty$ | 1. | $\sin^2(\theta)$ | $\sin^2(\theta)$ |
| 9 | Jn focusing | 1. | 1. | $\sin^2(\theta)$ | $\sin^2(\theta)$ |
|------+----------------------+----------+----------------+------------------+------------------|
It is important to note that all the coordinates are given in
units of R$_{1}^\prime$, the bending radius of the crystal
planes. This is crucial for converting to real dimensions (mm).
"""
rthetab = np.deg2rad(thetab)
# CASES
if (case == 1 or case == 'Johann' or case == 'Jn'):
R1 = 1.
R1p = 1.
R2 = np.inf
R2p = np.inf
elif (case == 2 or case == 'Johansson' or case == 'Js'):
R1 = 0.5
R1p = 1.
R2 = np.inf
R2p = np.inf
elif (case == 3 or case == 'Spherical plate' or case == 'Spherical Jn' or case == 'SphJn'):
R1 = 1.
R1p = 1.
R2 = 1.
R2p = 1.
elif (case == 4 or case == 'Spherical Johansson' or case == 'Spherical Js' or case == 'SphJs'):
R1 = 0.5
R1p = 1.
R2 = 0.5
R2p = 1.
elif (case == 5 or case == 'Wittry' or case == 'Toroidal Js' or case == 'TorJs'):
R1 = 0.5
R1p = 1.
R2 = 1.
R2p = 1.
elif (case == 6 or case == 'Js 45 deg focusing' or case == 'Js45focus'):
R1 = 0.5
R1p = 1.
R2 = 0.5
R2p = 0.5
elif (case == 7 or case == 'Js focusing' or case == 'JsFocus'):
R1 = 0.5
R1p = 1.
R2 = math.sin(rthetab)**2
R2p = math.sin(rthetab)**2
elif (case == 8 or case == 'Berreman'):
R1 = np.inf
R1p = 1.
R2 = math.sin(rthetab)**2
R2p = math.sin(rthetab)**2
elif (case == 9 or case == 'Jn focusing' or case == 'JnFocus'):
R1 = 1.
R1p = 1.
R2 = math.sin(rthetab)**2
R2p = math.sin(rthetab)**2
elif (case == 10 or case == '<NAME>' or case == 'VH'):
R1 = np.inf
R1p = np.inf
R2 = 1.
R2p = 1.
else:
raise NameError("case '{0}' unknown".format(case))
# COEFFICIENTS
if (R2p == 1. or R2p == np.inf):
A4p = 0.
else:
A4p = (1. - R2p) / (R2p**2)
A1 = (1./math.tan(rthetab)) * (1. - 1./(2.*R1))
A2 = (1./math.tan(rthetab))**2 * (1. - 1./(2.*R1))
A3 = (math.tan(rthetab)/2.) * ( (1./R2) - (1./(R2p**2)) ) + ( 1./(2.*math.sin(rthetab)*math.cos(rthetab)) ) * ( (2./R2p) - (1./R2) - 1.)
#A4 = (1./(2.*R2)) - (1./(4.*R2p)) + (1/(4.*R2p**2)) + (1./(math.sin(rthetab)**2)) * ((1./R2p) - (1./(2*R2)) - 1.)
A4 = (1./(2.*R2)) + (1./(2.*R2p)) - (1/(2.*R2p**2)) + (1./(math.sin(rthetab)**2)) * ((1./R2p) - (1./(2*R2)) - 1.)
if DEBUG:
print('Analytical DeltaTheta(x,z) for {0}'.format(case))
print('Radii: R1={0}, R1p={1}, R2={2}, R2p={3}'.format(R1, R1p, R2, R2p))
print('Coefficients:')
print('A1 = {0}'.format(A1))
print('A2 = {0}'.format(A2))
print('A3 = {0}'.format(A3))
print('A4p = {0}'.format(A4p))
print('A4 = {0}'.format(A4))
return A1 * x**2 + A2 * x**3 + A3 * z**2 + A4 * x * z**2
def getMeshMasked(mask='circular', r1p=1000., cryst_x=50., cryst_z=10., csteps=1000j):
"""returns two 2D masked arrays representing a (flat) grid of the
crystal surface
Parameters
----------
mask : str, 'circular'
shape of the mask: 'circular' or 'rectangular'
r1p : float, [1000.]
bending radius of the crystal planes in mm (used only to get
the mesh in normlized units)
cryst_x : float, [50.]
radius of circular analyzer in mm (for rectangular mask,
this is half side in meridional/dispersive x-direction)
cryst_z : float [10.]
half side in sagittal/focusing z-direction of the
rectangular analyzer in mm
csteps : grid steps (given as imaginary number!) [1000j]
"""
zmin, zmax = -1*cryst_x/r1p, cryst_x/r1p
| |
"""
*Student Name: <NAME>
*Student ID: 209399757
*Course Exercise Group: 02 Math
*Exercise name: ex7
"""
import argparse # mandatory
import random
import turtle
import string
import sys
def oldMysteryFunc(a, n):
while a != 0:
temp = a % n
print temp
a = a/n
def mysteryFunc(a, n):
"""
Displays a with the base n witch recursion
:param a: a must be a positive integer.
He is the number in base 10 we will change his base.
:param n: n must be a positive integer.
He is the base we want to change a into.
:return: Prints a as a number in base n
"""
if(a>0):
print a % n
mysteryFunc(a/n, n)
class Sierpinski(object):
def __init__(self):
"""
Initialize Sierpinski with a new turtle and screen
:param: No parmeters
:return: None(alters self)
"""
self.window = turtle.Screen()
self.sierpinski_turtle = turtle.Turtle()
def draw_sierpinski(self, length, depth):
"""
draws a sierpinski tree
:param length: the length of the base of the all tree
:param depth: the depth of the tree recursion
:return: None
"""
# Draw the big triangle
bob=self.sierpinski_turtle
bob.left(60)
bob.forward(length)
bob.right(120)
bob.forward(length)
bob.right(120)
bob.forward(length)
bob.left(180)
# Start to draw in recursion
self.draw_recursion(length, depth)
def draw_recursion(self, length, depth):
if(depth>0):
# Give a nice name for the turtle
bob=self.sierpinski_turtle
# Set the new length as length/2
newLength=length/2
# Draw a little triangle inside this triangle
bob.left(60)
bob.forward(newLength)
bob.right(60)
bob.forward(newLength)
bob.right(120)
bob.forward(newLength)
bob.right(120)
bob.forward(newLength)
# Return to his base
bob.left(120)
bob.forward(newLength)
bob.left(120)
# Now do the same with the 3 mini triangles
self.draw_recursion(newLength, depth-1)
bob.left(60)
bob.forward(newLength)
bob.right(60)
self.draw_recursion(newLength, depth-1)
bob.right(60)
bob.forward(newLength)
bob.left(60)
self.draw_recursion(newLength, depth-1)
bob.left(180)
bob.forward(newLength)
bob.left(180)
def finish_draw(self):
"""
Closes the window in order to stop drawing
:param: No parmeters
:return: None(alters self)
"""
self.window.bye()
def save_draw(self, length, depth):
"""
Saves the drawing in an svg file
:param length: the length of the base of the all tree
:param depth: the depth of the tree recursion
:return: None(alters self)
"""
self.sierpinski_turtle.hideturtle()
nameSav = ("sierpinski_%d_%d" % (length, depth)) + ".svg"
ts = turtle.getscreen().getcanvas()
ts.postscript(file=nameSav).encode('utf-8')
class GameStatus(object):
"""Enum of possible Game statuses."""
__init__ = None
NotStarted, InProgress, Win, Lose = range(4)
class BoardCell(object):
"""
Represents a cell in the minesweeper board game and is current status in the game
"""
def __init__(self):
"""
Initializes a board cell with no neighboring mines and status is hidden
Args:
None
Returns:
None (alters self)
"""
self.hidden=True
self.mine=False
self.neighbor_mines=0
def is_mine(self):
"""
returns true if this cell contains a mine false otherwise
Args:
None
Returns:
true if this cell contains a mine false otherwise
"""
return self.mine
def is_hidden(self):
"""
returns true if this cell is hidden false otherwise
Args:
None
Returns:
true if this cell is hidden false otherwise
"""
return self.hidden
def get_cell_value(self):
"""
returns the number of adjacent mines
Args:
None
Returns:
the number of adjacent mines in int or the charcter '*' if this cell is a mine
"""
return self.neighbor_mines
def uncover_cell(self):
"""
uncovers this cell. when a cell is uncovered then is status is the value of the mines near it or * if the
cell is a mine
Args:
None
Returns:
None (alters self)
"""
self.hidden=False
def update_cell_value(self, cellValue):
"""
updates the value of the how many neighboring mines this cell has
Args:
numOfNearMine - the new number of the how many neighboring mines this cell has
Returns:
None (alters self)
"""
self.neighbor_mines=cellValue
def add_one_to_cell_value(self):
"""
adds one to the number of near mine
Args:
None
Returns:
None (alters self)
"""
if(not self.mine):
self.neighbor_mines+=1
def set_has_mine(self):
"""
changes this cell to a cell with a mine in it
Args:
None
Returns:
None (alters self)
"""
self.mine=True
self.neighbor_mines='*'
class Board(object):
"""Represents a board of minesweeper game and its current progress."""
def __init__(self, rows, columns):
"""Initializes an empty hidden board.
The board will be in the specified dimensions, without mines in it,
and all of its cells are in hidden state.
Args:
rows: the number of rows in the board
columns: the number of columns in the board
Returns:
None (alters self)
"""
self.numRows = rows
self.numColumns = columns
# starts a board with row*col board cells
self.board = [[BoardCell() for _ in range(columns)] for _ in range(rows)]
def put_mines(self, mines, seed=None):
"""Randomly scatter the requested number of mines on the board.
At the beggining, all cells on the board are hidden and with no mines
at any of them. This method scatters the requested number of mines
throughout the board randomly, only if the board is in the beginning
state (as described here). A cell can host only one mine.
This method not only scatters the mines on the board, but also updates
the cells around it (so they will hold the right digit).
Args:
mines: the number of mines to scatter
seed: the seed to give the random function. Default value None
Returns:
None (alters self)
"""
listOfCellsIndex = [(numRow, numCol) for numRow in range(self.numRows) for numCol in range(self.numColumns)]
# randomly choosing cells in the board to place mines in
random.seed(seed)
listOfMineCells = random.sample(listOfCellsIndex, mines)
for (row, col) in listOfMineCells:
# Update the cell
self.board[row][col].set_has_mine()
# Update the nearby cells
self.update_nearby_cells(row, col)
def update_nearby_cells(self, row, col):
"""
Updates the adjecent cells of the cell board[row][col] by
increasing their mine counter by one.
Args:
row: the row of the new mine
col: the column of the new mine
Returns:
None (alters self)
"""
for (i, j) in [(x,y) for x in [-1,0,1] for y in[-1,0,1]]:
# Check if the indicies are still in range
if ((row+i<self.numRows) and (col+j<self.numColumns)
and (row+i>=0) and (col+j>=0)):
if(not self.board[row+i][col+j].is_mine()):
# This nearby cell is not a mine, add 1 to his value
self.board[row+i][col+j].add_one_to_cell_value()
def print_board(self):
"""prints the board according to the game format
DO NOT CHANGE ANYTHING IN THIS FUNCTION!!!!!!!
Args:
None
Returns:
None
"""
# creates the printing format
printFormatString = "%-2s " * self.numColumns
printFormatString += "%-2s"
# prints the first line of the board which is the line containing the indexes of the columns
argList = [" "]
argList.extend([str(i) for i in range(self.numColumns)])
print printFormatString % tuple(argList)
# goes over the board rows and prints each one
for i in range(self.numRows):
argList = [str(i)]
for j in range(self.numColumns):
if self.board[i][j].is_hidden():
argList.append("H")
else:
argList.append(str(self.board[i][j].get_cell_value()))
print printFormatString % tuple(argList)
def load_board(self, lines):
"""Loads a board from a sequence of lines.
This method is used to load a saved board from a sequence of strings
(that usually represent lines). Each line represents a row in the table
in the following format:
XY XY XY ... XY
Where X is one of the characters: 0-8, * and Y is one of letters: H, S.
0-8 = number of adjusting mines (0 is an empty, mine-free cell)
* = represents a mine in this cell
H = this cell is hidden
The lines can have multiple whitespace of any kind before and after the
lines of cells, but between each XY pair there is exactly one space.
Empty or whitespace-only lines are possible between valid lines, or after/before them.
It is safe to assume that the values are correct (the number represents the number of mines around
a given cell) and the number of mines is also legal.
Note that this method doesn't get the first two rows of the file (the
dimensions) on purpose - they are handled in __init__.
Args:
lines: a sequence (list or tuple) of lines with the above restrictions
Returns:
None (alters self)
"""
numMines=0
# Get amount of rows and columns
(row, column)=(int(lines[0]), int(lines[1]))
for i in range(0, len(lines)-2):
# Get the current line splitted
line=lines[i+2].split()
for j in range(0, len(line)):
# Get the current cell
cell=line[j]
if(cell[1]!='H'):
# Uncover the cell
self.board[i][j].uncover_cell()
if(cell[0]=='*'):
# Increase the number of mines
numMines+=1
self.board[i][j].set_has_mine()
else:
# Just set the cell value to his size in the text
self.board[i][j].update_cell_value(int(cell[0]))
if(numMines>row*column-1):
# Bad amount of mines
print "Illegal board"
exit(0)
def get_value(self, row, column):
"""Returns the | |
"""
Manager and Serializer for Library Folders.
"""
import logging
from typing import (
Optional,
Union,
)
from sqlalchemy.orm.exc import (
MultipleResultsFound,
NoResultFound
)
from galaxy import util
from galaxy.exceptions import (
AuthenticationRequired,
InconsistentDatabase,
InsufficientPermissionsException,
InternalServerError,
ItemAccessibilityException,
MalformedId,
RequestParameterInvalidException,
RequestParameterMissingException,
)
from galaxy.managers.roles import RoleManager
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
CreateLibraryFolderPayload,
LibraryAvailablePermissions,
LibraryFolderCurrentPermissions,
LibraryFolderDetails,
LibraryPermissionScope,
UpdateLibraryFolderPayload,
)
from galaxy.structured_app import StructuredApp
log = logging.getLogger(__name__)
# =============================================================================
class FolderManager:
"""
Interface/service object for interacting with folders.
"""
def get(self, trans, decoded_folder_id, check_manageable=False, check_accessible=True):
"""
Get the folder from the DB.
:param decoded_folder_id: decoded folder id
:type decoded_folder_id: int
:param check_manageable: flag whether the check that user can manage item
:type check_manageable: bool
:param check_accessible: flag whether to check that user can access item
:type check_accessible: bool
:returns: the requested folder
:rtype: LibraryFolder
:raises: InconsistentDatabase, RequestParameterInvalidException, InternalServerError
"""
try:
folder = trans.sa_session.query(trans.app.model.LibraryFolder).filter(trans.app.model.LibraryFolder.table.c.id == decoded_folder_id).one()
except MultipleResultsFound:
raise InconsistentDatabase('Multiple folders found with the same id.')
except NoResultFound:
raise RequestParameterInvalidException('No folder found with the id provided.')
except Exception as e:
raise InternalServerError(f"Error loading from the database.{util.unicodify(e)}")
folder = self.secure(trans, folder, check_manageable, check_accessible)
return folder
def secure(self, trans, folder, check_manageable=True, check_accessible=True):
"""
Check if (a) user can manage folder or (b) folder is accessible to user.
:param folder: folder item
:type folder: LibraryFolder
:param check_manageable: flag whether to check that user can manage item
:type check_manageable: bool
:param check_accessible: flag whether to check that user can access item
:type check_accessible: bool
:returns: the original folder
:rtype: LibraryFolder
"""
# all folders are accessible to an admin
if trans.user_is_admin:
return folder
if check_manageable:
folder = self.check_manageable(trans, folder)
if check_accessible:
folder = self.check_accessible(trans, folder)
return folder
def check_modifyable(self, trans, folder):
"""
Check whether the user can modify the folder (name and description).
:returns: the original folder
:rtype: LibraryFolder
:raises: AuthenticationRequired, InsufficientPermissionsException
"""
if not trans.user:
raise AuthenticationRequired("Must be logged in to manage Galaxy items.", type='error')
current_user_roles = trans.get_current_user_roles()
if not trans.app.security_agent.can_modify_library_item(current_user_roles, folder):
raise InsufficientPermissionsException("You don't have permissions to modify this folder.", type='error')
else:
return folder
def check_manageable(self, trans, folder):
"""
Check whether the user can manage the folder.
:returns: the original folder
:rtype: LibraryFolder
:raises: AuthenticationRequired, InsufficientPermissionsException
"""
if not trans.user:
raise AuthenticationRequired("Must be logged in to manage Galaxy items.", type='error')
current_user_roles = trans.get_current_user_roles()
if not trans.app.security_agent.can_manage_library_item(current_user_roles, folder):
raise InsufficientPermissionsException("You don't have permissions to manage this folder.", type='error')
else:
return folder
def check_accessible(self, trans, folder):
"""
Check whether the folder is accessible to current user.
By default every folder is accessible (contents have their own permissions).
"""
return folder
def get_folder_dict(self, trans, folder):
"""
Return folder data in the form of a dictionary.
:param folder: folder item
:type folder: LibraryFolder
:returns: dict with data about the folder
:rtype: dictionary
"""
folder_dict = folder.to_dict(view='element')
folder_dict = trans.security.encode_all_ids(folder_dict, True)
folder_dict['id'] = f"F{folder_dict['id']}"
if folder_dict['parent_id'] is not None:
folder_dict['parent_id'] = f"F{folder_dict['parent_id']}"
folder_dict['update_time'] = folder.update_time
return folder_dict
def create(self, trans, parent_folder_id, new_folder_name, new_folder_description=''):
"""
Create a new folder under the given folder.
:param parent_folder_id: decoded id
:type parent_folder_id: int
:param new_folder_name: name of the new folder
:type new_folder_name: str
:param new_folder_description: description of the folder (optional, defaults to empty string)
:type new_folder_description: str
:returns: the new folder
:rtype: LibraryFolder
:raises: InsufficientPermissionsException
"""
parent_folder = self.get(trans, parent_folder_id)
current_user_roles = trans.get_current_user_roles()
if not (trans.user_is_admin or trans.app.security_agent.can_add_library_item(current_user_roles, parent_folder)):
raise InsufficientPermissionsException('You do not have proper permission to create folders under given folder.')
new_folder = trans.app.model.LibraryFolder(name=new_folder_name, description=new_folder_description)
# We are associating the last used genome build with folders, so we will always
# initialize a new folder with the first dbkey in genome builds list which is currently
# ? unspecified (?)
new_folder.genome_build = trans.app.genome_builds.default_value
parent_folder.add_folder(new_folder)
trans.sa_session.add(new_folder)
trans.sa_session.flush()
# New folders default to having the same permissions as their parent folder
trans.app.security_agent.copy_library_permissions(trans, parent_folder, new_folder)
return new_folder
def update(self, trans, folder, name=None, description=None):
"""
Update the given folder's name or description.
:param folder: the model object
:type folder: LibraryFolder
:param name: new name for the library folder
:type name: str
:param description: new description for the library folder
:type description: str
:returns: the folder
:rtype: LibraryFolder
:raises: ItemAccessibilityException, InsufficientPermissionsException
"""
changed = False
if not trans.user_is_admin:
folder = self.check_modifyable(trans, folder)
if folder.deleted is True:
raise ItemAccessibilityException("You cannot update a deleted library folder. Undelete it first.")
if name is not None and name != folder.name:
folder.name = name
changed = True
if description is not None and description != folder.description:
folder.description = description
changed = True
if changed:
trans.sa_session.add(folder)
trans.sa_session.flush()
return folder
def delete(self, trans, folder, undelete=False):
"""
Mark given folder deleted/undeleted based on the flag.
:param folder: the model object
:type folder: LibraryFolder
:param undelete: flag whether to delete (when False) or undelete
:type undelete: Bool
:returns: the folder
:rtype: LibraryFolder
:raises: ItemAccessibilityException
"""
if not trans.user_is_admin:
folder = self.check_manageable(trans, folder)
if undelete:
folder.deleted = False
else:
folder.deleted = True
trans.sa_session.add(folder)
trans.sa_session.flush()
return folder
def get_current_roles(self, trans, folder):
"""
Find all roles currently connected to relevant permissions
on the folder.
:param folder: the model object
:type folder: LibraryFolder
:returns: dict of current roles for all available permission types
:rtype: dictionary
"""
# Omit duplicated roles by converting to set
modify_roles = set(trans.app.security_agent.get_roles_for_action(folder, trans.app.security_agent.permitted_actions.LIBRARY_MODIFY))
manage_roles = set(trans.app.security_agent.get_roles_for_action(folder, trans.app.security_agent.permitted_actions.LIBRARY_MANAGE))
add_roles = set(trans.app.security_agent.get_roles_for_action(folder, trans.app.security_agent.permitted_actions.LIBRARY_ADD))
modify_folder_role_list = [(modify_role.name, trans.security.encode_id(modify_role.id)) for modify_role in modify_roles]
manage_folder_role_list = [(manage_role.name, trans.security.encode_id(manage_role.id)) for manage_role in manage_roles]
add_library_item_role_list = [(add_role.name, trans.security.encode_id(add_role.id)) for add_role in add_roles]
return dict(modify_folder_role_list=modify_folder_role_list,
manage_folder_role_list=manage_folder_role_list,
add_library_item_role_list=add_library_item_role_list)
def can_add_item(self, trans, folder):
"""
Return true if the user has permissions to add item to the given folder.
"""
if trans.user_is_admin:
return True
current_user_roles = trans.get_current_user_roles()
add_roles = set(trans.app.security_agent.get_roles_for_action(folder, trans.app.security_agent.permitted_actions.LIBRARY_ADD))
for role in current_user_roles:
if role in add_roles:
return True
return False
def cut_the_prefix(self, encoded_folder_id):
"""
Remove the prefix from the encoded folder id.
:param encoded_folder_id: encoded id of the Folder object with 'F' prepended
:type encoded_folder_id: string
:returns: encoded Folder id without the 'F' prefix
:rtype: string
:raises: MalformedId
"""
if ((len(encoded_folder_id) % 16 == 1) and encoded_folder_id.startswith('F')):
cut_id = encoded_folder_id[1:]
else:
raise MalformedId(f'Malformed folder id ( {str(encoded_folder_id)} ) specified, unable to decode.')
return cut_id
def decode_folder_id(self, trans, encoded_folder_id):
"""
Decode the folder id given that it has already lost the prefixed 'F'.
:param encoded_folder_id: encoded id of the Folder object
:type encoded_folder_id: string
:returns: decoded Folder id
:rtype: int
:raises: MalformedId
"""
return trans.security.decode_id(encoded_folder_id, object_name="folder")
def cut_and_decode(self, trans, encoded_folder_id):
"""
Cuts the folder prefix (the prepended 'F') and returns the decoded id.
:param encoded_folder_id: encoded id of the Folder object
:type encoded_folder_id: string
:returns: decoded Folder id
:rtype: int
"""
return self.decode_folder_id(trans, self.cut_the_prefix(encoded_folder_id))
class FoldersService:
"""Common interface/service logic for interactions with library folders in the context of the API.
Provides the logic of the actions invoked by API controllers and uses type definitions
and pydantic models to declare its parameters and return types.
"""
def __init__(self, app: StructuredApp, folder_manager: FolderManager, role_manager: RoleManager) -> None:
self._app = app
self.folder_manager = folder_manager
self.role_manager = role_manager
def show(self, trans, id: EncodedDatabaseIdField) -> LibraryFolderDetails:
"""
Displays information about a folder.
:param id: the folder's encoded id (required)
:type id: an encoded id string (has to be prefixed by 'F')
:returns: dictionary including details of the folder
:rtype: dict
"""
folder_id = self.folder_manager.cut_and_decode(trans, id)
folder = self.folder_manager.get(trans, folder_id, check_manageable=False, check_accessible=True)
return_dict = self.folder_manager.get_folder_dict(trans, folder)
return LibraryFolderDetails.parse_obj(return_dict)
def create(
self,
trans,
encoded_parent_folder_id: EncodedDatabaseIdField,
payload: CreateLibraryFolderPayload
) -> LibraryFolderDetails:
"""
Create a new folder object underneath the one specified in the parameters.
:param encoded_parent_folder_id: (required) the parent folder's id
:type encoded_parent_folder_id: an encoded id string (should be prefixed by 'F')
:param payload: dictionary structure containing:
:param name: (required) the name of the new folder
:type name: str
:param description: the description of the new folder
:type description: str
:type dictionary
:returns: information about newly created folder, notably including ID
:rtype: dictionary
:raises: RequestParameterMissingException
"""
decoded_parent_folder_id = self.folder_manager.cut_and_decode(trans, encoded_parent_folder_id)
parent_folder = self.folder_manager.get(trans, decoded_parent_folder_id)
new_folder = self.folder_manager.create(trans, parent_folder.id, payload.name, payload.description)
return_dict = self.folder_manager.get_folder_dict(trans, new_folder)
return LibraryFolderDetails.parse_obj(return_dict)
def get_permissions(
self,
trans,
encoded_folder_id: EncodedDatabaseIdField,
scope: Optional[LibraryPermissionScope] = LibraryPermissionScope.current,
page: Optional[int] = 1,
| |
track.term_id, track.vote, track.star
from SI.Terms as term,
SI.Tracking as track
where track.user_id=%s
and track.term_id=term.id
and term.owner_id!=%s
and track.star=true
order by term_string;
""", (user_id, user_id))
for row in cur.fetchall():
yield row
def getTrackingByTerm(self, term_id):
""" Return an iterator over users tracking a term.
:param term_id: ID of term.
:type user_id: int
:returns: User rows.
:rtype: dict iterator
"""
cur = self.con.cursor()
cur.execute("""SELECT user_id FROM SI.Tracking
WHERE term_id=%s """, (term_id,))
for row in cur.fetchall():
yield row[0]
def search(self, string):
""" Search table by term_string, definition and examples. Rank
results by relevance to query, consensus, and classificaiton.
:param string: Search query.
:type string: str
:rtype: dict list
"""
string = string.replace("'", "''")
string = ' & '.join(string.split(' ')) # |'s are also aloud, and paranthesis TODO
cur = self.con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT id, owner_id, term_string, definition, examples,
up, down, created, modified, consensus, class, concept_id,
ts_rank_cd(tsv, query, 32 /* rank(rank+1) */ ) AS rank
FROM SI.Terms, to_tsquery('english', %s) query
WHERE query @@ tsv
ORDER BY rank DESC
""", (string,))
rows = sorted(cur.fetchall(), key=lambda row: orderOfClass[row['class']])
rows = sorted(rows, key=lambda row: row['consensus'], reverse=True)
return list(rows)
def updateTerm(self, id, term):
""" Modify a term's term string, deifnition and examples.
Note: term ownership authenticated upstream!
:param id: Term ID.
:type id: int
:param term: Dictionary containing atleast the keys 'term_string', 'definition',
and 'examples' with string values.
:type term: dict
"""
cur = self.con.cursor()
cur.execute("UPDATE SI.Terms SET term_string=%s, definition=%s, examples=%s WHERE id=%s",
(term['term_string'], term['definition'], term['examples'], id))
## User queries ##
def insertUser(self, user):
""" Insert a new user into the table and return the new ID.
:param user: Default values are used for any omitted columns.
:type user: dict
:rtype: int or None
"""
defUser = {
"id" : None,
"email" : "nil",
"last_name" : "nil",
"first_name" : "nil",
"reputation" : None,
"authority" : "nil",
"auth_id" : "nil"
}
# Format entries for db query
for (key, value) in user.iteritems():
defUser[key] = unicode(value).replace("'", "''")
try:
cur = self.con.cursor()
cur.execute("""INSERT INTO SI.Users(id, email, last_name, first_name, reputation, authority, auth_id)
VALUES (%s, %s, %s, %s, %s, %s, %s)
RETURNING id""", (defUser['id'],
defUser['email'],
defUser['last_name'],
defUser['first_name'],
defUser['reputation'],
defUser['authority'],
defUser['auth_id']))
res = cur.fetchone()
if res:
return res[0]
else:
return None
except pgdb.DatabaseError, e:
if e.pgcode == '23505': # Duplicate primary key
print >>sys.stderr, "warning: skipping duplicate primary key Id=%s" % defUser['id']
cur.execute("ROLLBACK;")
return None
raise e
def getUser(self, id):
""" Get User by ID.
:param id: User ID.
:type id: int
:rtype: dict or None
"""
cur = self.con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute("""
select id, authority, auth_id, email, last_name, first_name,
reputation, enotify
from SI.Users where id=%s;
""" % (id,))
return cur.fetchone()
def getAllUsers(self):
""" Return an iterator over ``SI.Users``.
:rtype: dict iterator
"""
cur = self.con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute("""
select id, authority, auth_id, email, last_name, first_name,
reputation, enotify
from SI.Users;
""")
for row in cur.fetchall():
yield row
def getUserByAuth(self, authority, auth_id):
""" Get user identified by an authentication ID. It's assumed that this ID
is unique in the context of a particular authority, such as Google.
**TODO:** originally I planned to use (authority, auth_id) as the unique
constraint on the SI.Users table. My guess is that most, if not all
services have an associated email address. The unique constraint is
actually the user's email. This method should be replaced.
:param authority: Organization providing authentication.
:type authority: str
:param auth_id: Authentication ID.
:type auth_id: str
:returns: Internal surrogate ID of user.
:rtype: int or None
"""
cur = self.con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute("""
select id, authority, auth_id, email, last_name, first_name,
reputation
from SI.Users where auth_id=%s and authority=%s;
""", (auth_id, authority))
res = cur.fetchone()
return res
def getUserNameById(self, id, full=False):
""" Get username by ID.
:param id: User ID.
:type id: int
:param full: Get full name
:type full: bool
:returns: If *full* is set, then return the first and last name of the user.
Otherwise, just return the first name.
:rtype: str or None
"""
cur = self.con.cursor()
try:
cur.execute("SELECT first_name, last_name FROM SI.Users WHERE id=%s", (id,))
res = cur.fetchone()
if res and full:
return res[0] + " " + res[1]
elif res and not full:
return res[0]
else:
return None
finally:
cur.close()
def updateUser(self, id, first, last, enotify):
""" Update user's name.
:param id: User ID.
:type id: int
:param first: First name.
:type first: str
:param last: Last name.
:type last: str
"""
cur = self.con.cursor()
cur.execute("UPDATE SI.Users SET first_name=%s, last_name=%s, enotify='%s' WHERE id=%s",
(first, last, enotify, id))
def updateUserReputation(self, id, rep):
""" Set reputation of user. This triggers an update of the consensus score
and term stability. Commit updates immediately.
:param id: User ID.
:type id: int
:param rep: New reputation score.
:type rep: int
"""
cur = self.con.cursor()
cur.execute("SELECT now(), count(*) FROM SI.Users")
(T_now, t) = cur.fetchone()
cur.execute("SELECT reputation FROM SI.Users WHERE id=%s", (id,))
p_rep = cur.fetchone()
if not p_rep:
return None
p_rep = p_rep[0]
cur.execute("""SELECT v.vote, t.id, t.up, t.down, t.U_sum, t.D_sum,
t.T_last, t.T_stable, t.consensus
FROM SI.Tracking as v,
SI.Terms as t
WHERE v.user_id = %s
AND v.term_id = t.id
AND v.vote != 0""" % (id,))
for (vote, term_id, u, d, U_sum, D_sum, T_last, T_stable, p_S) in cur.fetchall():
#: Compute new consensus score
if vote == 1: U_sum += rep - p_rep
elif vote == -1: D_sum += rep - p_rep
S = calculateConsensus(u, d, t, float(U_sum), float(D_sum))
#: See if stability has changed
T_stable = calculateStability(S, p_S, T_now, T_last, T_stable)
cur.execute("""UPDATE SI.Terms SET consensus={1}, T_last='{2}', T_stable={3},
U_sum={4}, D_sum={5} WHERE id={0}; COMMIT""".format(
term_id, S, str(T_now), repr(str(T_stable)) if T_stable else "NULL", U_sum, D_sum))
cur.execute("UPDATE SI.Users SET reputation=%d WHERE id=%d RETURNING id; COMMIT" % (rep, id))
return id
## Comment queries ##
def insertComment(self, comment):
""" Insert a new comment into the database and return ID.
:param comment: New comment as dictionary. Default values will be used for ommitted
columns.
:type comment: dict
:rtype: int
"""
defComment = {
"id" : None,
"owner_id" : None,
"term_id" : None,
"comment_string" : "nil"
}
#: Format entries for db query
for (key, value) in comment.iteritems():
defComment[key] = value
try:
cur = self.con.cursor()
cur.execute("""INSERT INTO SI.Comments (id, owner_id, term_id, comment_string)
VALUES (%s, %s, %s, %s)
RETURNING id""", (defComment['id'],
defComment['owner_id'],
defComment['term_id'],
defComment['comment_string']))
res = cur.fetchone()
if res: return res[0]
else: return None
except pgdb.DatabaseError, e:
if e.pgcode == '23505': #: Duplicate primary key
print >>sys.stderr, "warning: skipping duplicate primary key Id=%s" % defComment['id']
cur.execute("ROLLBACK;")
return None
raise e
def removeComment(self, id):
""" Remove comment and return ID.
:param id: Comment ID.
:type id: int
:rtype: int or None
"""
cur = self.con.cursor()
cur.execute("DELETE FROM SI.Comments WHERE id=%s RETURNING id", (id,))
res = cur.fetchone()
if res: return res[0]
else: return None
def updateComment(self, id, comment):
""" Update term comment. Note that user authentication is handled up stream!
:param id: Comment ID.
:type id: int
:param comment: Expects at least the key 'comment_string' with string value.
:type id: dict
"""
cur = self.con.cursor()
cur.execute("UPDATE SI.Comments SET comment_string=%s WHERE id=%s",
(comment['comment_string'], id))
def getComment(self, id):
""" Get comment by ID.
:param id: Comment ID.
:type id: int
:rtype: dict or None
"""
cur = self.con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
cur.execute("""
select id, owner_id, term_id, created, modified, comment_string
from SI.Comments where id=%s;
""", (id,))
return cur.fetchone()
def getCommentHistory(self, term_id):
| |
<filename>tutorials/rstor/TutorialDebugging.py<gh_stars>0
"""Build DEBUGGING.rst
"""
from helpers import *
from et_rstor import *
def TutorialDebugging():
doc = RstDocument('TutorialDebugging', headings_numbered_from_level=2, is_default_document=True)
with pickled.open(mode='r') as f:
doc.heading_numbers = json.load(f)
Include('../HYPERLINKS.rst')
Heading('Debugging binary extensions', level=2)
Paragraph(
"Debugging is the process of executing a program step by step, in order to discover "
"where and why it goes wrong. It is an indispensable step in software development. "
"Although tests may tell you what part of your code fails, but the origin of the "
"failure is not always clear. As explained in the tutorials (see :ref:`testing-your-code`) "
"unit tests are useful for two reasons:"
)
List(
[ 'they assure that your code does not get broken while you add features, or modify it, and'
, 'they constrain the part of the code that has an issue. If a test fails, the origin of the '
'must be somewhere in the part of the code that is tested. By keeping the tested parts small, '
'you will find the flaw sooner, and proceed faster. '
]
, numbered=True
)
Paragraph(
'For small projects inserting print statements in flawed code can be a good approach to discover '
'the flaw, but it is cumbersome and in the case of binary extensions requires rebuilding the code '
'often. Debugging is a more scalable approach. '
)
Paragraph(
"Graphical Debuggers as provided in IDEes, e.g. PyCharm, Eclipse_ + pydev_, Visual Studio, "
"present a great user experience, but not all are capable of debugging mixed Python/C++/Fortran. "
"See `here <https://wiki.python.org/moin/IntegratedDevelopmentEnvironments>`_ for more information."
)
List(
[ "Pycharm_: only Python, but great user experience."
, "eclipse: debugging binaries should be possible but not really mixed mode."
, "Visual Studio code: provides mixed language debugging Python/C++/Fortran."
]
)
Note('June 8, 2021: On MACOS, Visual Studio Code, as it uses lldb under the hood, also does not show the variables '
'in a Fortran binary extension. It is unclear whether that is due to a quirk in f2py or lldb.')
Paragraph(
"For HPC environments there is also:"
)
List(
[ "`Arm DDT <https://www.arm.com/products/development-tools/server-and-hpc/forge/ddt>`_"
, "`TotalView HPC Debugging Software <https://totalview.io/products/totalview>`_"
]
)
Paragraph(
"These are also capable debugging OpenMP (multi-threaded) and MPI applications "
"(multi-process)."
)
Paragraph(
"For Linux environments there is also a lightweight approach possible using gdb_ and "
"pdb_. On MACOS gdb_ can be replaced by lldb_, which has very similar features, but "
"different commands. (At the time of writing gdb_ for MACOS was broken). Here are two "
"links describing the approach:"
)
List(
[ "https://www.researchgate.net/figure/Debugging-both-C-extensions-and-Python-code-with-gdb-and-pdb_fig2_220307949"
, "https://www.boost.org/doc/libs/1_76_0/libs/python/doc/html/faq/how_do_i_debug_my_python_extensi.html"
]
, numbered=True
)
Paragraph(
"The first link describes a fully mixed Python C++ approach, and works for Fortran as well. The"
"second link, is semi-mixed. It expects you to enter the Python commands yourself, which may "
"be tedious at times, but can be practical to explore the situation."
)
Paragraph(
"We illustrate both strategies using a project foo with a C++ binary extension ``cxx``, and a "
"Fortran binary extension ``fortran``. The code we are using is just the example code created "
"by micc2_, which defines a function for adding to arrays. "
)
CodeBlock(
[ '> micc2 create foo --package'
, '...'
, '> micc2 add cxx --cpp'
, '...'
, '> micc2 add fortran --f90'
, '...'
, '> micc2 info'
, 'Project foo located at /Users/etijskens/software/dev/workspace/foo'
, ' package: foo'
, ' version: 0.0.0'
, ' structure: foo/__init__.py (Python package)'
, ' contents:'
, ' C++ module cpp_cxx/cxx.cpp'
, ' f90 module f90_fortran/fortran.f90'
, 'micc2 build --build-type Debug'
, '...'
]
)
Paragraph(
'Make sure that you pass the ``--build-type Debug`` flag, so that the binary extensions are built '
'with debug information.'
)
Paragraph(
'It is recommend to debug small scripts, rather than complete applications. This is, however, not always '
'possible.'
)
Heading("Mixed Python/C++ debugging with lldb and pdb", level=3)
Paragraph(
'This section illustrates mixed language debugging of a Python script calling a method from a C++ '
'binary extension. Here we are using ``lldb`` on a MACOS system. In the next section we will do the '
'same for a Fortran binary extension on Linux (Ubuntu), using ``gdb``.'
)
Note('For an overview of ``lldb`` checkout https://lldb.llvm.org.')
Note('For an overview of ``pdb`` checkout https://docs.python.org/3/library/pdb.html, and '
'`Python Debugging With Pdb <https://realpython.com/python-debugging-pdb/>`_.')
Paragraph(
"Suppose we are concerned about the C++ correctness of the ``add`` function and that we want to execute "
"it step by step to see if it runs as expected. "
"We first demonstrate the approach of the first link above, on MACOS, using lldb_ instead of gdb_. "
"The commands are different for ``gdb`` and ``lldb``, but the strategy is exactly the same. First, "
"start lldb_ with the Python executable you want to use. As I am using pyenv_ to manage differen python "
"versions on my machine, the ``python`` on the PATH is only a wrapper for the the real Python executable, "
"so I must specify the full path, because ``lldb`` expects a true executable."
)
CodeBlock(
[ '> lldb ~/.pyenv/versions/3.8.5/bin/python'
, '(lldb) target create "/Users/etijskens/.pyenv/versions/3.8.5/bin/python"'
, 'Current executable set to \'/Users/etijskens/.pyenv/versions/3.8.5/bin/python\' (x86_64).'
, '(lldb) target create "/Users/etijskens/.pyenv/versions/3.8.5/bin/python"'
, '(lldb)'
]
)
Paragraph(
'Next, you set a breakpoint in the c++ file, e.g. on the first line of the ``add`` function. As the '
'binary extension, which is in fact nothing else than a dynamic library, has not been loaded yet, ``lldb`` '
'replies that there is no location for the breakpoint, and that the breakpoint is \'pending\', i.e. '
'waiting to become active as soon as the dynamic library is loaded.'
)
CodeBlock(
[ '(lldb) breakpoint set --file cxx.cpp -l 19'
, 'Breakpoint 1: no locations (pending).'
, 'WARNING: Unable to resolve breakpoint to any actual locations.'
, '(lldb)'
]
)
Paragraph(
'Next, start the Python test script for the C++ add function, :file:`tests\test_cpp_cxx.py` with '
'``pdb``:'
)
CodeBlock(
[ '(lldb) run -m pdb tests/test_cpp_cxx.py'
, 'Process 26917 launched: \'/Users/etijskens/.pyenv/versions/3.8.5/bin/python\' (x86_64)'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(4)<module>()'
, '-> """'
, '(Pdb)'
]
)
Paragraph(
'and set a ``pdb`` breakpoint on the test method for the ``add`` function (which is called in '
'the ``if __name__ == "__main__":`` body: '
)
CodeBlock(
[ '(Pdb) b test_cpp_add'
, 'Breakpoint 1 at /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py:19'
, '(Pdb)'
]
)
Paragraph(
'This time the breakpoint is found right away, because the file that contains it, ``tests/test_cpp_cxx.py`` '
'is already loaded. '
)
Paragraph (
'Now we are ready to start the script with the ``r(un)`` command, after which ``pbd`` stops at the first '
'line in the test_cpp_add method, the ``pdb`` breakpoint:'
)
CodeBlock(
[ '(Pdb) r'
, '1 location added to breakpoint 1'
, '__main__ running <function test_cpp_add at 0x104890310> ...'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(20)test_cpp_add()'
, '-> x = np.array([0,1,2,3,4],dtype=float)'
, '(Pdb)'
]
)
Paragraph(
'Now, we can execute this line and inspect the variable ``x`` with the ``p(rint)`` command:'
)
CodeBlock(
[ '(Pdb) n'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(21)test_cpp_add()'
, '-> shape = x.shape'
, '(Pdb) p x'
, 'array([0., 1., 2., 3., 4.])'
, '(Pdb)'
]
)
Paragraph(
'Continue stepping until you arrive at the call to ``cpp.add``, you can examine de contents of ``y`` and '
'``z`` as well, just as every other variable which is in the scope:'
)
CodeBlock(
[ '(Pdb) n'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(22)test_cpp_add()'
, '-> y = np.ones (shape,dtype=float)'
, '(Pdb) n'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(23)test_cpp_add()'
, '-> z = np.zeros(shape,dtype=float)'
, '(Pdb) n'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(24)test_cpp_add()'
, '-> expected_z = x + y'
, '(Pdb) n'
, '> /Users/etijskens/software/dev/workspace/foo/tests/test_cpp_cxx.py(25)test_cpp_add()'
, '-> result = cpp.add(x,y,z)'
, '(Pdb) p y'
, 'array([1., 1., 1., 1., 1.])'
, '(Pdb) p z'
, 'array([0., 0., 0., 0., 0.])'
, '(Pdb)'
]
)
Paragraph(
'Stepping once more will hit the breakpoint on linr 19 of file ``cxx.cpp`` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.