input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_all'])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_stetsonj'])
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects
def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat='hat-sql',
magcols=None):
'''
This makes plots for the variability threshold distributions.
'''
if lcformat not in LCFORM or lcformat is None:
LOGERROR('unknown light curve format specified: %s' % lcformat)
return None
(fileglob, readerfunc, dtimecols, dmagcols,
derrcols, magsarefluxes, normfunc) = LCFORM[lcformat]
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,'rb') as infd:
allobjects = pickle.load(infd)
magbins = allobjects['magbins']
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol]['min_lcmad_stdev']
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol]['min_stetj_stdev']
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol]['min_iqr_stdev']
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol]['min_inveta_stdev']
)
fig = plt.figure(figsize=(20,16))
# the mag vs lcmad
plt.subplot(221)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['lcmad']*1.483,
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol]['binned_lcmad_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel(r'lightcurve RMS (MAD $\times$ 1.483)')
plt.title('%s - SDSS r vs. light curve RMS' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs stetsonj
plt.subplot(222)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['stetsonj'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_stetsonj_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_stetsonj_median']) +
min_stetj_stdev*np.array(
allobjects[magcol]['binned_stetsonj_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel('Stetson J index')
plt.title('%s - SDSS r vs. Stetson J index' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(223)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['iqr'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_iqr_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_iqr_median']) +
min_iqr_stdev*np.array(
allobjects[magcol]['binned_iqr_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel('IQR')
plt.title('%s - SDSS r vs. IQR' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(224)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['inveta'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_inveta_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_inveta_median']) +
min_inveta_stdev*np.array(
allobjects[magcol]['binned_inveta_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel(r'$1/\eta$')
plt.title(r'%s - SDSS r vs. $1/\eta$' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
plt.savefig('varfeatures-%s-%s-distributions.png' % (varthreshpkl,
magcol),
bbox_inches='tight')
plt.close('all')
#############################
## RUNNING PERIOD SEARCHES ##
#############################
def runpf(lcfile,
outdir,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
pfmethods=['gls','pdm','mav','win'],
pfkwargs=[{},{},{},{}],
sigclip=10.0,
getblssnr=False,
nworkers=10,
excludeprocessed=False):
'''This runs the period-finding for a single LC.
pfmethods is a list of period finding methods to run. Each element is a
string matching the keys of the PFMETHODS dict above. By default, this runs
GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.
pfkwargs are any special kwargs to pass along to each period-finding method
function.
If excludeprocessing is True, light curves that have existing periodfinding
result pickles in outdir will not be processed.
FIXME: currently, this uses a dumb method of excluding already-processed
files. A smarter way to do this is to (i) generate a SHA512 cachekey based
on a repr of {'lcfile', 'timecols', 'magcols', 'errcols', 'lcformat',
'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}, (ii) make sure all list
kwargs in the dict are sorted, (iii) check if the output file has the same
cachekey in its filename (last 8 chars of cachekey should work), so the
result was processed in exactly the same way as specifed in the input to
this function, and can therefore be ignored. Will implement this later.
'''
if lcformat not in LCFORM or lcformat is None:
LOGERROR('unknown light curve format specified: %s' % lcformat)
return None
(fileglob, readerfunc, dtimecols, dmagcols,
derrcols, magsarefluxes, normfunc) = LCFORM[lcformat]
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
try:
# get the LC into a dict
lcdict = readerfunc(lcfile)
if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict):
lcdict = lcdict[0]
outfile = os.path.join(outdir, 'periodfinding-%s.pkl' %
lcdict['objectid'])
# if excludeprocessed is True, return the output file if it exists and
# has a size that is at least 100 kilobytes (this should be enough to
# contain the minimal results of this function).
if excludeprocessed:
test_outfile = os.path.exists(outfile)
test_outfile_gz = os.path.exists(outfile+'.gz')
if (test_outfile and os.stat(outfile).st_size > 102400):
LOGWARNING('periodfinding result for %s already exists at %s, '
'skipping because excludeprocessed=True'
% (lcfile, outfile))
return outfile
elif (test_outfile_gz and os.stat(outfile+'.gz').st_size > 102400):
LOGWARNING(
'gzipped periodfinding result for %s already '
'exists at %s, skipping because excludeprocessed=True'
% (lcfile, outfile+'.gz')
)
return outfile+'.gz'
# this is the final returndict
resultdict = {
'objectid':lcdict['objectid'],
'lcfbasename':os.path.basename(lcfile),
'kwargs':{'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'lcformat':lcformat,
'pfmethods':pfmethods,
'pfkwargs':pfkwargs,
'sigclip':sigclip,
'getblssnr':getblssnr}
}
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# run each of the requested period-finder functions
resultdict[mcolget[-1]] = {}
pfmkeys = []
for pfmind, pfm, pfkw in zip(range(len(pfmethods)),
pfmethods,
pfkwargs):
pf_func = PFMETHODS[pfm]
# get any optional kwargs for this function
pf_kwargs = pfkw
pf_kwargs.update({'verbose':False,
'nworkers':nworkers,
'magsarefluxes':magsarefluxes,
'sigclip':sigclip})
# we'll always prefix things with their index to allow multiple
# invocations and results from the same period-finder (for
# different period ranges, for example).
pfmkey = '%s-%s' % (pfmind, pfm)
pfmkeys.append(pfmkey)
# run this period-finder and save its results to the output dict
resultdict[mcolget[-1]][pfmkey] = pf_func(
times, mags, errs,
**pf_kwargs
)
#
# done with running the period finders
#
# append the pfmkeys list to the magcol dict
resultdict[mcolget[-1]]['pfmethods'] = pfmkeys
# check if we need to get the SNR from any BLS pfresults
if 'bls' in pfmethods and getblssnr:
# we need to scan thru the pfmethods to get to any BLS pfresults
for pfmk in resultdict[mcolget[-1]]['pfmethods']:
if 'bls' in pfmk:
try:
bls = resultdict[mcolget[-1]]['bls']
# calculate the SNR for the BLS as well
blssnr = bls_snr(bls, times, mags, errs,
magsarefluxes=magsarefluxes,
verbose=False)
# add the SNR results to the BLS result dict
resultdict[mcolget[-1]][pfmk].update({
'snr':blssnr['snr'],
'altsnr':blssnr['altsnr'],
'transitdepth':blssnr['transitdepth'],
'transitduration':blssnr['transitduration'],
})
except Exception as e:
LOGEXCEPTION('could not calculate BLS SNR for %s' %
lcfile)
# add the SNR null results to the BLS result dict
resultdict[mcolget[-1]][pfmk].update({
'snr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'altsnr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'transitdepth':[np.nan,np.nan,np.nan,
np.nan,np.nan],
'transitduration':[np.nan,np.nan,np.nan,
np.nan,np.nan],
})
elif 'bls' in pfmethods:
# we need to scan thru the pfmethods to get to any BLS pfresults
for pfmk in resultdict[mcolget[-1]]['pfmethods']:
if 'bls' in pfmk:
# add the SNR null results to the BLS result dict
resultdict[mcolget[-1]][pfmk].update({
'snr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'altsnr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'transitdepth':[np.nan,np.nan,np.nan,
np.nan,np.nan],
'transitduration':[np.nan,np.nan,np.nan,
np.nan,np.nan],
})
# once all mag cols have been processed, write out the pickle
with open(outfile, 'wb') as outfd:
pickle.dump(resultdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
except Exception as e:
LOGEXCEPTION('failed to run for %s, because: %s' % (lcfile, e))
return None
def runpf_worker(task):
'''
This runs the runpf function.
'''
(lcfile, outdir, timecols, magcols, errcols, lcformat,
pfmethods, pfkwargs, getblssnr, sigclip, nworkers, excludeprocessed) = task
if os.path.exists(lcfile):
pfresult = runpf(lcfile,
outdir,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nworkers=nworkers,
excludeprocessed=excludeprocessed)
return pfresult
else:
LOGERROR('LC does not exist for requested file %s' % lcfile)
return None
def parallel_pf(lclist,
outdir,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
pfmethods=['gls','pdm','mav','win'],
| |
indices may have specific locations so prune the tree
# This does not work when different indices can be provided by
# different components since the tress are only stored according to their
# index context. When different indices are be provided by
# different components the index context are the same but the trees are
# different
if prune:
tree.prune_from_path(path)
return tree
def reshape(self, items: List, inplace: bool = True) -> Any:
"""Reshape items according to the shape defined by the tree
Args:
items (List): Flat list of to be reshaped
inplace (bool): If True the specified list will be reshaped.
If False a copy will be created and reshaped.
Returns:
None objects arraged like the tree.
"""
_items = items if inplace else copy.deepcopy(items)
# Always one node at the top of the tree
top_node = self.nodes_for_level[0][0]
# Used to check if items are ever split.
# Use top node since it is not part of the loop
as_list = not top_node.is_constrained
for nodes in reversed(self.nodes_for_level[1:]):
# If 1 node and contrained by pruning do not split
# Assures that "segments[0].layers[0].thickness[:]" would result
# in a 1D list while "segments[0].layers[:].thickness[:]" would
# result in a 2D list even if there were only 1 layer
# print("is_constrained", nodes[0].is_constrained)
# if len(nodes) == 1 and nodes[0].is_constrained:
# continue
if all([node.is_constrained for node in nodes]):
continue
as_list = True
nchildren = [node.nchildren() for node in nodes]
_items = split_items(_items, nchildren)
if as_list:
# Reduce dimensionality by 1
if top_node.is_constrained:
_items = _items[0]
return _items
else:
# TODO: could check len(items) = 1 at the top
assert (
len(items) == 1
), f"Expected list of length 1, but found {len(items)}."
return _items
def _node_for_idxs(self, idxs: List[int]):
"""
Get the node corresponding to a list of indices
"""
if len(idxs) == 0:
node = self.nodes_for_level[0][0]
else:
node = self.nodes_for_level[0][0]._child_for_idx[idxs[0]]
for node_idx in idxs[1:]:
node = node._child_for_idx[node_idx]
return node
def normalize_path(self, qpath: HubitQueryPath) -> HubitQueryPath:
"""Handle negative indices
As stated in "test_normalize_path2" the normalization in general depends
on the context
"""
idxids = qpath.get_index_specifiers()
_path = copy.copy(qpath)
for idx_level, idxid in enumerate(idxids):
if is_digit(idxid) and int(idxid) < 0:
# Get index context i.e. indices prior to current level
_idx_context = [
int(idx) for idx in _path.get_index_specifiers()[:idx_level]
]
node = self._node_for_idxs(_idx_context)
_path = HubitQueryPath(
_path.replace(idxid, str(node.nchildren() + int(idxid)), 1)
)
return _path
def expand_path(
self,
path: Path,
flat: bool = False,
) -> List[HubitQueryPath]:
"""Expand model path with wildcard based on tree
Example for a query path:
list[:].some_attr.numbers ->
[ list[0].some_attr.numbers, list[1].some_attr.numbers, list[2].some_attr.numbers ]
Args:
path (Path): Any path with wildcards and index IDs
flat (bool): Return expansion result as a flat list.
Returns:
List: Paths from expansion. Arranged in the shape
defined by the tree if flat = False. Otherwise a
flat list.
"""
# Get the content of the braces
idxspecs = path.get_index_specifiers()
ranges = path.ranges()
# Manipulate as strings
paths = [str(path)]
for idx_level, (idxspec, range) in enumerate(zip(idxspecs, ranges)):
nodes = self.nodes_for_level[idx_level]
paths_current_level = []
for _path, node in zip(paths, nodes):
if range.is_digit:
# slice is digit so replace index specifier with that digit
paths_current_level.append(_path.replace(idxspec, range))
elif range.is_full_range or range.is_empty:
# slice is wildcard or not specified so expand from node children
paths_current_level.extend(
[
_path.replace(idxspec, str(child.index), 1)
for child in node.children
]
)
elif range.is_limited_range:
# Loop over children to see who are included in the range
paths_current_level.extend(
[
_path.replace(idxspec, str(child.index), 1)
for child in node.children
if range.contains_index(child.index)
]
)
else:
raise HubitError(
f"Unknown index range '{range}' for path '{path}' of type '{type(path)}'."
)
paths = copy.deepcopy(paths_current_level)
# Cast strings as paths
_paths = [HubitQueryPath(_path) for _path in paths]
if flat:
return _paths
else:
return self.reshape(_paths)
def _all_nodes_constrained(self):
return all(
[node.is_constrained for nodes in self.nodes_for_level for node in nodes]
)
def number_of_leaves(self):
"""Number of leaves in the tree"""
return sum(self.number_of_children(-1))
def number_of_children(self, idx_level: int) -> List[int]:
"""Number of children for each node at the specified level"""
return [node.nchildren() for node in self.nodes_for_level[idx_level]]
def none_like(self) -> Any:
"""Create data structure in the shape of the tree
filled with None
Returns:
Data structure with None
"""
# If all node are constrained the result is None. No list to reshape
if self._all_nodes_constrained():
return None
# Reshape a flat list with all elements set to None. The list has length
# like all leaves in the tree
return self.reshape([None for _ in range(self.number_of_leaves())])
def __str__(self):
lines = ["--------------------", "Tree"]
for idx, (name, nodes) in enumerate(
zip(
self.level_names,
self.nodes_for_level,
)
):
nparents = len({node.parent for node in nodes if node.parent is not None})
nchildren = sum([node.nchildren() for node in nodes])
children_are_leaves = [
all([isinstance(child, LeafNode) for child in node.children])
for node in nodes
]
idx_node = [[child.index for child in node.children] for node in nodes]
is_constrained = [node.is_constrained for node in nodes]
lines.append(
f"level={idx} ({name}), "
f"nodes={len(nodes)} (constrained: {is_constrained}), "
f"parents={nparents}, "
f"children={nchildren}, "
f"children are leaves={children_are_leaves}, "
f"child idxs={idx_node}"
)
lines.append("--------------------")
lines.append("Lengths")
size_for_level = self.to_list()
for idx, (name, size) in enumerate(zip(self.level_names, size_for_level)):
lines.append(f"level={idx} ({name}), {size}")
return "\n".join(lines)
class _QueryExpansion:
"""A Hubit query expansion. A query can be split into multiple queries
Args:
path: A [`HubitQueryPath`][hubit.config.HubitQueryPath] representing the original query.
decomposed_paths: If a single component can provide results for `path`, `decomposed_paths`
has one element of type [`HubitQueryPath`][hubit.config.HubitQueryPath]. If multiple
components match the query individual path contributions are the items in the list.
expanded_paths_for_decomposed_path: For each element in `decomposed_paths`
these are the expanded paths i.e. dotted paths with real indices not
wildcards.
"""
def __init__(self, path: HubitQueryPath, mpaths: List[HubitModelPath]):
"""
path: the query path
mpaths: the model paths that match the query
"""
self.path = path
if len(mpaths) > 1 and not path.has_slice_range():
# Should not be possible to have multiple providers if the query
# points to a specific path i.e. has no ranges.
# TODO: This check could be more strict e.g. the wildcard is located where
# the mpaths vary
raise HubitModelQueryError(
f"More than one component match the query '{path}'. Matching components provide: {mpaths}."
)
# Get the index contexts for doing some tests
_idx_contexts = {mpath.get_idx_context() for mpath in mpaths}
if len(_idx_contexts) > 1:
msg = f"Fatal error. Inconsistent providers for query '{path}': {', '.join(mpaths)}"
raise HubitModelQueryError(msg)
if len(_idx_contexts) == 0:
msg = f"Fatal error. No provider for query path '{path}'."
raise HubitModelQueryError(msg)
self.decomposed_paths, index_identifiers = _QueryExpansion.decompose_query(
path, mpaths
)
self.expanded_paths_for_decomposed_path: Dict[
HubitQueryPath, List[HubitQueryPath]
] = {}
if index_identifiers is None:
self.decomposed_idx_identifier = None
else:
# Cannot occur since len(_idx_contexts) is 1
# if len(set(index_identifiers)) > 1:
# msg = f"Fatal error. Inconsistent decomposition for query '{path}': {', '.join(mpaths)}"
# raise HubitModelQueryError(msg)
self.decomposed_idx_identifier = index_identifiers[0]
self._idx_context = list(_idx_contexts)[0]
@property
def idx_context(self):
"""The (one) index context corresponding to the model paths"""
return self._idx_context
def update_expanded_paths(
self, decomposed_path: HubitQueryPath, expanded_paths: List[HubitQueryPath]
):
self.expanded_paths_for_decomposed_path[decomposed_path] = expanded_paths
def flat_expanded_paths(self):
"""Returns flat list of expanded paths"""
return [
path
for paths in self.expanded_paths_for_decomposed_path.values()
for path in paths
]
def is_decomposed(self):
return len(self.decomposed_paths) > 1
def is_expanded(self):
if (
not self.is_decomposed()
and self.path == self.decomposed_paths[0]
and len(self.expanded_paths_for_decomposed_path[self.decomposed_paths[0]])
== 1
and self.path
== self.expanded_paths_for_decomposed_path[self.decomposed_paths[0]][0]
):
return False
else:
return True
@staticmethod
def decompose_query(
qpath: HubitQueryPath, mpaths: List[HubitModelPath]
) -> Tuple[List[HubitQueryPath], Union[List[str], None]]:
"""
If a single component can provide results for `path`, `decomposed_paths`
has one element of type [`HubitQueryPath`][hubit.config.HubitQueryPath].
If multiple components are required to provide the query their individual
path contributions are the items in the list. index_identifiers are the
index identifiers corresponding to the decomposed index
"""
index_identifiers: Union[List, None]
if len(mpaths) > 1:
# More than one provider required to match query. Split query into queries
# each having a unique provider
decomposed_qpaths = []
# Index identifiers corresponding to decomposed field
index_identifiers = []
for mpath in mpaths:
q_idx_specs = qpath.get_index_specifiers()
idxs, ranges = zip(
*[
(idx, range)
for idx, range in enumerate(mpath.ranges())
if not range.is_empty
]
| |
<gh_stars>10-100
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for normalizing flows.
For a review of normalizing flows see: https://arxiv.org/abs/1912.02762
The abstract base class ConfigurableFlow demonstrates our minimal interface.
Although the standard change of variables formula requires that
normalizing flows are invertible, none of the algorithms in train.py
require evaluating that inverse explicitly so inverses are not implemented.
"""
import abc
from typing import Callable, List, Tuple
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
ConfigDict = tp.ConfigDict
class ConfigurableFlow(hk.Module, abc.ABC):
"""Abstract base clase for configurable normalizing flows.
This is the interface expected by all flow based algorithms called in train.py
"""
def __init__(self, config: ConfigDict):
super().__init__()
self._check_configuration(config)
self._config = config
def _check_input(self, x: Array) -> Array:
chex.assert_rank(x, 1)
def _check_outputs(self, x: Array, transformed_x: Array,
log_abs_det_jac: Array) -> Array:
chex.assert_rank(x, 1)
chex.assert_equal_shape([x, transformed_x])
chex.assert_shape(log_abs_det_jac, ())
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError('Flow config element not found: ', elem)
if not isinstance(config[elem], elem_type):
msg = 'Flow config element '+elem+' is not of type '+str(elem_type)
raise TypeError(msg)
def __call__(self, x: Array) -> Tuple[Array, Array]:
"""Call transform_and_log abs_det_jac with automatic shape checking.
This calls transform_and_log_abs_det_jac which needs to be implemented
in derived classes.
Args:
x: Array size (num_dim,) containing input to flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
self._check_input(x)
output, log_abs_det_jac = self.transform_and_log_abs_det_jac(x)
self._check_outputs(x, output, log_abs_det_jac)
return output, log_abs_det_jac
@abc.abstractmethod
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Transform x through the flow and compute log abs determinant of Jacobian.
Args:
x: (num_dim,) input to the flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
@abc.abstractmethod
def _check_configuration(self, config: ConfigDict):
"""Check the configuration includes the necessary fields.
Will typically raise Assertion like errors.
Args:
config: A ConfigDict include the fields required by the flow.
"""
class DiagonalAffine(ConfigurableFlow):
"""An affine transformation with a positive diagonal matrix."""
def _check_configuration(self, unused_config: ConfigDict):
pass
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
num_elem = x.shape[0]
unconst_diag_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
bias_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
unconst_diag = hk.get_parameter('unconst_diag',
shape=[num_elem],
dtype=x.dtype,
init=unconst_diag_init)
bias = hk.get_parameter('bias',
shape=[num_elem],
dtype=x.dtype,
init=bias_init)
output = jnp.exp(unconst_diag)*x + bias
log_abs_det = jnp.sum(unconst_diag)
return output, log_abs_det
def rational_quadratic_spline(x: Array,
bin_positions: Array,
bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""Compute a rational quadratic spline.
See https://arxiv.org/abs/1906.04032
Args:
x: A single real number.
bin_positions: A sorted array of bin positions of length num_bins+1.
bin_heights: An array of bin heights of length num_bins+1.
derivatives: An array of derivatives at bin positions of length num_bins+1.
Returns:
Value of the rational quadratic spline at x.
Derivative with respect to x of rational quadratic spline at x.
"""
bin_index = jnp.searchsorted(bin_positions, x)
array_index = bin_index % len(bin_positions)
lower_x = bin_positions[array_index-1]
upper_x = bin_positions[array_index]
lower_y = bin_heights[array_index-1]
upper_y = bin_heights[array_index]
lower_deriv = derivatives[array_index-1]
upper_deriv = derivatives[array_index]
delta_x = upper_x - lower_x
delta_y = upper_y - lower_y
slope = delta_y / delta_x
alpha = (x - lower_x)/delta_x
alpha_squared = jnp.square(alpha)
beta = alpha * (1.-alpha)
gamma = jnp.square(1.-alpha)
epsilon = upper_deriv+lower_deriv -2. *slope
numerator_quadratic = delta_y * (slope*alpha_squared + lower_deriv*beta)
denominator_quadratic = slope + epsilon*beta
interp_x = lower_y + numerator_quadratic/denominator_quadratic
# now compute derivative
numerator_deriv = jnp.square(slope) * (
upper_deriv * alpha_squared + 2. * slope * beta + lower_deriv * gamma)
sqrt_denominator_deriv = slope + epsilon*beta
denominator_deriv = jnp.square(sqrt_denominator_deriv)
deriv = numerator_deriv / denominator_deriv
return interp_x, deriv
def identity_padded_rational_quadratic_spline(
x: Array, bin_positions: Array, bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""An identity padded rational quadratic spline.
Args:
x: the value to evaluate the spline at.
bin_positions: sorted values of bin x positions of length num_bins+1.
bin_heights: absolute height of bin of length num_bins-1.
derivatives: derivatives at internal bin edge of length num_bins-1.
Returns:
The value of the spline at x.
The derivative with respect to x of the spline at x.
"""
lower_limit = bin_positions[0]
upper_limit = bin_positions[-1]
bin_height_sequence = (jnp.atleast_1d(jnp.array(lower_limit)),
bin_heights,
jnp.atleast_1d(jnp.array(upper_limit)))
full_bin_heights = jnp.concatenate(bin_height_sequence)
derivative_sequence = (jnp.ones((1,)),
derivatives,
jnp.ones((1,)))
full_derivatives = jnp.concatenate(derivative_sequence)
in_range = jnp.logical_and(jnp.greater(x, lower_limit),
jnp.less(x, upper_limit))
multiplier = in_range*1.
multiplier_complement = jnp.logical_not(in_range)*1.
spline_val, spline_deriv = rational_quadratic_spline(x,
bin_positions,
full_bin_heights,
full_derivatives)
identity_val = x
identity_deriv = 1.
val = spline_val*multiplier + multiplier_complement*identity_val
deriv = spline_deriv*multiplier + multiplier_complement*identity_deriv
return val, deriv
class AutoregressiveMLP(hk.Module):
"""An MLP which is constrained to have autoregressive dependency."""
def __init__(self,
num_hiddens_per_input_dim: List[int],
include_self_links: bool,
non_linearity,
zero_final: bool,
bias_last: bool,
name=None):
super().__init__(name=name)
self._num_hiddens_per_input_dim = num_hiddens_per_input_dim
self._include_self_links = include_self_links
self._non_linearity = non_linearity
self._zero_final = zero_final
self._bias_last = bias_last
def __call__(self, x: Array) -> Array:
input_dim = x.shape[0]
hidden_representation = jnp.atleast_2d(x).T
prev_hid_per_dim = 1
num_hidden_layers = len(self._num_hiddens_per_input_dim)
final_index = num_hidden_layers-1
for layer_index in range(num_hidden_layers):
is_last_layer = (final_index == layer_index)
hid_per_dim = self._num_hiddens_per_input_dim[layer_index]
name_stub = '_'+str(layer_index)
layer_shape = (input_dim,
prev_hid_per_dim,
input_dim,
hid_per_dim)
in_degree = prev_hid_per_dim * input_dim
if is_last_layer and self._zero_final:
w_init = jnp.zeros
else:
w_init = hk.initializers.TruncatedNormal(1. / np.sqrt(in_degree))
bias_init = hk.initializers.Constant(jnp.zeros((input_dim, hid_per_dim,)))
weights = hk.get_parameter(name='weights'+name_stub,
shape=layer_shape,
dtype=x.dtype,
init=w_init)
if is_last_layer and not self._bias_last:
biases = jnp.zeros((input_dim, hid_per_dim,))
else:
biases = hk.get_parameter(name='biases'+name_stub,
shape=(input_dim, hid_per_dim),
dtype=x.dtype,
init=bias_init)
if not(self._include_self_links) and is_last_layer:
k = -1
else:
k = 0
mask = jnp.tril(jnp.ones((input_dim, input_dim)),
k=k)
masked_weights = mask[:, None, :, None] * weights
new_hidden_representation = jnp.einsum('ijkl,ij->kl',
masked_weights,
hidden_representation) + biases
prev_hid_per_dim = hid_per_dim
if not is_last_layer:
hidden_representation = self._non_linearity(new_hidden_representation)
else:
hidden_representation = new_hidden_representation
return hidden_representation
class InverseAutogressiveFlow(object):
"""A generic inverse autoregressive flow.
See https://arxiv.org/abs/1606.04934
Takes two functions as input.
1) autoregressive_func takes array of (num_dim,)
and returns array (num_dim, num_features)
it is autoregressive in the sense that the output[i, :]
depends only on the input[:i]. This is not checked.
2) transform_func takes array of (num_dim, num_features) and
an array of (num_dim,) and returns output of shape (num_dim,)
and a single log_det_jacobian value. The represents the transformation
acting on the inputs with given parameters.
"""
def __init__(self,
autoregressive_func: Callable[[Array], Array],
transform_func: Callable[[Array, Array], Tuple[Array, Array]]):
self._autoregressive_func = autoregressive_func
self._transform_func = transform_func
def __call__(self, x: Array) -> Tuple[Array, Array]:
"""x is of shape (num_dim,)."""
transform_features = self._autoregressive_func(x)
output, log_abs_det = self._transform_func(transform_features, x)
return output, log_abs_det
class SplineInverseAutoregressiveFlow(ConfigurableFlow):
"""An inverse autoregressive flow with spline transformer.
config must contain the following fields:
num_spline_bins: Number of bins for rational quadratic spline.
intermediate_hids_per_dim: See AutoregresiveMLP.
num_layers: Number of layers for AutoregressiveMLP.
identity_init: Whether to initalize the flow to the identity.
bias_last: Whether to include biases on the last later of AutoregressiveMLP
lower_lim: Lower limit of active region for rational quadratic spline.
upper_lim: Upper limit of active region for rational quadratic spline.
min_bin_size: Minimum bin size for rational quadratic spline.
min_derivative: Minimum derivative for rational quadratic spline.
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
self._num_spline_bins = config.num_spline_bins
num_spline_parameters = 3 * config.num_spline_bins - 1
num_hids_per_input_dim = [config.intermediate_hids_per_dim
] * config.num_layers + [
num_spline_parameters
]
self._autoregressive_mlp = AutoregressiveMLP(
num_hids_per_input_dim,
include_self_links=False,
non_linearity=jax.nn.leaky_relu,
zero_final=config.identity_init,
bias_last=config.bias_last)
self._lower_lim = config.lower_lim
self._upper_lim = config.upper_lim
self._min_bin_size = config.min_bin_size
self._min_derivative = config.min_derivative
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('num_spline_bins', int),
('intermediate_hids_per_dim', int),
('num_layers', int),
('identity_init', bool),
('bias_last', bool),
('lower_lim', float),
('upper_lim', float),
('min_bin_size', float),
('min_derivative', float)
]
self._check_members_types(config, expected_members_types)
def _unpack_spline_params(self, raw_param_vec) -> Tuple[Array, Array, Array]:
unconst_bin_size_x = raw_param_vec[:self._num_spline_bins]
unconst_bin_size_y = raw_param_vec[self._num_spline_bins:2 *
self._num_spline_bins]
unconst_derivs = raw_param_vec[2 * self._num_spline_bins:(
3 * self._num_spline_bins - 1)]
return unconst_bin_size_x, unconst_bin_size_y, unconst_derivs
def _transform_raw_to_spline_params(
self, raw_param_vec: Array) -> Tuple[Array, Array, Array]:
unconst_bin_size_x, unconst_bin_size_y, unconst_derivs = self._unpack_spline_params(
raw_param_vec)
def normalize_bin_sizes(unconst_bin_sizes: Array) -> Array:
bin_range = self._upper_lim - self._lower_lim
reduced_bin_range = (
bin_range - self._num_spline_bins * self._min_bin_size)
return jax.nn.softmax(
unconst_bin_sizes) * reduced_bin_range + self._min_bin_size
bin_size_x = normalize_bin_sizes(unconst_bin_size_x)
bin_size_y = normalize_bin_sizes(unconst_bin_size_y)
# | |
"show running-config interface ethernet 1/g10").and_return([
"switchport mode trunk",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_trunk_vlan("ethernet 1/g10", 1000)
def test_add_trunk_vlan_unknown_interface(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g99").and_return([
"ERROR: Invalid input!",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.add_trunk_vlan("ethernet 1/g99", 1000)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 1/g99"))
def test_add_trunk_vlan_unknown_vlan(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode trunk",
])
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration.",
" Failure Information",
"---------------------------------------",
" VLANs failed to be configured : 1",
"---------------------------------------",
" VLAN Error",
"---------------------------------------",
"VLAN 1000 ERROR: This VLAN does not exist.",
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("ethernet 1/g10", 1000)
assert_that(str(expect.exception), equal_to("Vlan 1000 not found"))
def test_add_trunk_vlan_to_general_mode(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode general",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport general allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_trunk_vlan("ethernet 1/g10", 1000)
def test_add_trunk_vlan_without_mode_and_access_vlan_assume_no_mode_set_trunk_mode(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_trunk_vlan("ethernet 1/g10", 1000)
def test_add_trunk_vlan_without_mode_with_access_vlan_assume_access_mode_and_fails(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport access vlan 2000",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.add_trunk_vlan("ethernet 1/g10", 1000)
assert_that(str(expect.exception), equal_to("Operation cannot be performed on a access mode interface"))
def test_remove_trunk_vlan(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode trunk",
"switchport trunk allowed vlan add 1000",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.remove_trunk_vlan("ethernet 1/g10", 1000)
def test_remove_trunk_vlan_unknown_interface(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g99").and_return([
"ERROR: Invalid input!",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.remove_trunk_vlan("ethernet 1/g99", 1000)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 1/g99"))
def test_remove_trunk_vlan_not_set_at_all(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode trunk",
])
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_trunk_vlan("ethernet 1/g10", 1000)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface ethernet 1/g10"))
def test_remove_trunk_vlan_not_set_in_ranges(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode trunk",
"switchport trunk allowed vlan add 999,1001",
])
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_trunk_vlan("ethernet 1/g10", 1000)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface ethernet 1/g10"))
def test_remove_trunk_vlan_general_mode_and_in_range(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface ethernet 1/g10").and_return([
"switchport mode general",
"switchport general allowed vlan add 999-1001",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport general allowed vlan remove 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.remove_trunk_vlan("ethernet 1/g10", 1000)
def test_add_bond_trunk_vlan(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode trunk",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_bond_trunk_vlan(10, 1000)
def test_add_bond_trunk_vlan_unknown_interface(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 99").and_return([
"ERROR: Invalid input!",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(UnknownBond) as expect:
self.switch.add_bond_trunk_vlan(99, 1000)
assert_that(str(expect.exception), equal_to("Bond 99 not found"))
def test_add_bond_trunk_vlan_unknown_vlan(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode trunk",
])
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration.",
" Failure Information",
"---------------------------------------",
" VLANs failed to be configured : 1",
"---------------------------------------",
" VLAN Error",
"---------------------------------------",
"VLAN 1000 ERROR: This VLAN does not exist.",
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_bond_trunk_vlan(10, 1000)
assert_that(str(expect.exception), equal_to("Vlan 1000 not found"))
def test_add_bond_trunk_vlan_to_general_mode(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode general",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport general allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_bond_trunk_vlan(10, 1000)
def test_add_bond_trunk_vlan_without_mode_and_access_vlan_assume_no_mode_set_trunk_mode(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.add_bond_trunk_vlan(10, 1000)
def test_add_bond_trunk_vlan_without_mode_with_access_vlan_assume_access_mode_and_fails(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport access vlan 2000",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.add_bond_trunk_vlan(10, 1000)
assert_that(str(expect.exception), equal_to("Operation cannot be performed on a access mode interface"))
def test_remove_bond_trunk_vlan(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode trunk",
"switchport trunk allowed vlan add 1000",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.remove_bond_trunk_vlan(10, 1000)
def test_remove_bond_trunk_vlan_unknown_interface(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 99").and_return([
"ERROR: Invalid input!",
])
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
with self.assertRaises(UnknownBond) as expect:
self.switch.remove_bond_trunk_vlan(99, 1000)
assert_that(str(expect.exception), equal_to("Bond 99 not found"))
def test_remove_bond_trunk_vlan_not_set_at_all(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode trunk",
])
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_bond_trunk_vlan(10, 1000)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface port-channel 10"))
def test_remove_bond_trunk_vlan_not_set_in_ranges(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode trunk",
"switchport trunk allowed vlan add 999,1001",
])
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_bond_trunk_vlan(10, 1000)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface port-channel 10"))
def test_remove_bond_trunk_vlan_general_mode_and_in_range(self):
flexmock(self.switch.page_reader).should_receive("do").with_args(self.mocked_ssh_client, "show running-config interface port-channel 10").and_return([
"switchport mode general",
"switchport general allowed vlan add 999-1001",
])
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("switchport general allowed vlan remove 1000").once().ordered().and_return([
"Warning: The use of large numbers of VLANs or interfaces may cause significant",
"delays in applying the configuration."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.remove_bond_trunk_vlan(10, 1000)
def test_edit_interface_spanning_tree_enable_edge(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("spanning-tree portfast").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.edit_interface_spanning_tree('ethernet 1/g10', edge=True)
def test_edit_interface_spanning_tree_disable_edge(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no spanning-tree portfast").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.edit_interface_spanning_tree('ethernet 1/g10', edge=False)
def test_edit_interface_spanning_tree_optional_params(self):
self.mocked_ssh_client.should_receive("do").with_args("configure").never()
self.switch.edit_interface_spanning_tree("ethernet 1/g10")
def test_set_interface_lldp_state(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("lldp transmit").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("lldp receive").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("lldp med transmit-tlv capabilities").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("lldp med transmit-tlv network-policy").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_interface_lldp_state("ethernet 1/g10", True)
def test_disable_lldp(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no lldp transmit").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no lldp receive").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no lldp med transmit-tlv capabilities").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no lldp med transmit-tlv network-policy").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_interface_lldp_state("ethernet 1/g10", False)
def test_set_interface_description(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("description \"Hey\"").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_interface_description("ethernet 1/g10", "Hey")
def test_set_interface_description_invalid_interface(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g99").once().ordered().and_return([
"An invalid interface has been used for this function."
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_description("ethernet 1/g99", "Hey")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 1/g99"))
def test_set_interface_description_invalid_description(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("description \"Hey \"you\"\"").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(BadInterfaceDescription) as expect:
self.switch.set_interface_description("ethernet 1/g10", 'Hey "you"')
assert_that(str(expect.exception), equal_to("Invalid description : Hey \"you\""))
def test_set_bond_description(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("description \"Hey\"").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_bond_description(10, "Hey")
def test_set_bond_description_invalid_bond(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 99999").once().ordered().and_return([
"An invalid interface has been used for this function."
])
with self.assertRaises(UnknownBond) as expect:
self.switch.set_bond_description(99999, "Hey")
assert_that(str(expect.exception), equal_to("Bond 99999 not found"))
def test_set_bond_description_invalid_description(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("description \"Hey \"you\"\"").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(BadInterfaceDescription) as expect:
self.switch.set_bond_description(10, 'Hey "you"')
assert_that(str(expect.exception), equal_to("Invalid description : Hey \"you\""))
def test_set_interface_mtu(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("mtu 1520").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_interface_mtu("ethernet 1/g10", 1520)
def test_unset_interface_mtu(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no mtu").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.unset_interface_mtu("ethernet 1/g10")
def test_set_interface_mtu_with_out_of_range_value_raises(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface ethernet 1/g10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("mtu 9999").once().ordered().and_return([
" ^",
"Value is out of range. The valid range is 1518 to 9216."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(InvalidMtuSize) as expect:
self.switch.set_interface_mtu("ethernet 1/g10", 9999)
assert_that(str(expect.exception), equal_to("MTU value is invalid : 9999"))
def test_set_bond_mtu(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("mtu 1520").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.set_bond_mtu(10, 1520)
def test_set_bond_mtu_with_out_of_range_value_raises(self):
with self.configuring():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("mtu 9999").once().ordered().and_return([
" ^",
"Value is out of range. The valid range is 1518 to 9216."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(InvalidMtuSize) as expect:
self.switch.set_bond_mtu(10, 9999)
assert_that(str(expect.exception), equal_to("MTU value is invalid : 9999"))
def test_unset_bond_mtu(self):
with self.configuring_and_committing():
self.mocked_ssh_client.should_receive("do").with_args("interface port-channel 10").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("no mtu").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.unset_bond_mtu(10)
def test_get_mac_addresses(self):
| |
<filename>internal/build.py
import os
import time
import subprocess
import functools
import pipes
import shutil
import socket
import sys
from http import HTTPStatus as hs
from conducto import api
from conducto.shared import client_utils, constants, log, types as t
import conducto.internal.host_detection as hostdet
@functools.lru_cache(None)
def docker_desktop_23():
# Docker Desktop
try:
kwargs = dict(check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Docker Desktop 2.2.x
lsdrives = "docker run --rm -v /:/mnt/external alpine ls /mnt/external/host_mnt"
proc = subprocess.run(lsdrives, shell=True, **kwargs)
return False
except subprocess.CalledProcessError:
return True
@functools.lru_cache(None)
def docker_available_drives():
import string
if hostdet.is_wsl():
kwargs = dict(check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
drives = []
for drive in string.ascii_lowercase:
drivedir = f"{drive}:\\"
try:
subprocess.run(f"wslpath -u {drivedir}", shell=True, **kwargs)
drives.append(drive)
except subprocess.CalledProcessError:
pass
else:
from ctypes import windll # Windows only
# get all drives
drive_bitmask = windll.kernel32.GetLogicalDrives()
letters = string.ascii_lowercase
drives = [letters[i] for i, v in enumerate(bin(drive_bitmask)) if v == "1"]
# filter to fixed drives
is_fixed = lambda x: windll.kernel32.GetDriveTypeW(f"{x}:\\") == 3
drives = [d for d in drives if is_fixed(d.upper())]
return drives
@functools.lru_cache(None)
def _split_windocker(path):
chunks = path.split("//")
mangled = hostdet.wsl_host_docker_path(chunks[0])
if len(chunks) > 1:
newctx = f"{mangled}//{chunks[1]}"
else:
newctx = mangled
return newctx
def _wsl_translate_locations(node):
# Convert image contexts to Windows host paths in the format that docker
# understands.
drives = set()
image_ids = []
imagelist = []
for child in node.stream():
if id(child.image) not in image_ids:
image_ids.append(id(child.image))
imagelist.append(child.image)
for img in imagelist:
path = img.copy_dir
if path:
newpath = _split_windocker(path)
img.copy_dir = newpath
drives.add(newpath[1])
path = img.context
if path:
newpath = _split_windocker(path)
img.context = newpath
drives.add(newpath[1])
path = img.dockerfile
if path:
newpath = _split_windocker(path)
img.dockerfile = newpath
drives.add(newpath[1])
return drives
def _windows_translate_locations(node):
# Convert image contexts to format that docker understands.
drives = set()
image_ids = []
imagelist = []
for child in node.stream():
if id(child.image) not in image_ids:
image_ids.append(id(child.image))
imagelist.append(child.image)
for img in imagelist:
path = img.copy_dir
if path:
newpath = hostdet.windows_docker_path(path)
img.copy_dir = newpath
drives.add(newpath[1])
path = img.context
if path:
newpath = hostdet.windows_docker_path(path)
img.context = newpath
drives.add(newpath[1])
path = img.dockerfile
if path:
newpath = hostdet.windows_docker_path(path)
img.dockerfile = newpath
drives.add(newpath[1])
return drives
def build(
node,
build_mode=constants.BuildMode.DEPLOY_TO_CLOUD,
use_shell=False,
use_app=True,
retention=7,
is_public=False,
):
assert node.parent is None
assert node.name == "/"
if hostdet.is_wsl():
required_drives = _wsl_translate_locations(node)
elif hostdet.is_windows():
required_drives = _windows_translate_locations(node)
if hostdet.is_wsl() or hostdet.is_windows():
available = docker_available_drives()
unavailable = set(required_drives).difference(available)
if len(unavailable) > 0:
msg = f"The drive {unavailable.pop()} is used in an image context, but is not available in Docker. Review your Docker Desktop file sharing settings."
raise hostdet.WindowsMapError(msg)
from .. import api
# refresh the token for every pipeline launch
# Force in case of cognito change
node.token = token = api.Auth().get_token_from_shell(force=True)
serialization = node.serialize()
command = " ".join(pipes.quote(a) for a in sys.argv)
# Register pipeline, get <pipeline_id>
cloud = build_mode == constants.BuildMode.DEPLOY_TO_CLOUD
pipeline_id = api.Pipeline().create(
token,
command,
cloud=cloud,
retention=retention,
tags=node.tags or [],
title=node.title,
is_public=is_public,
)
launch_from_serialization(
serialization, pipeline_id, build_mode, use_shell, use_app, token
)
def launch_from_serialization(
serialization,
pipeline_id,
build_mode=constants.BuildMode.DEPLOY_TO_CLOUD,
use_shell=False,
use_app=True,
token=None,
inject_env=None,
is_migration=False,
):
if not token:
token = api.Auth().get_token_from_shell(force=True)
def cloud_deploy():
# Get a token, serialize, and then deploy to AWS. Once that
# returns, connect to it using the shell_ui.
api.Pipeline().save_serialization(token, pipeline_id, serialization)
api.Manager().launch(
token, pipeline_id, env=inject_env, is_migration=is_migration
)
log.debug(f"Connecting to pipeline_id={pipeline_id}")
def local_deploy():
clean_log_dirs(token)
# Write serialization to ~/.conducto/
local_progdir = constants.ConductoPaths.get_local_path(pipeline_id)
os.makedirs(local_progdir, exist_ok=True)
serialization_path = os.path.join(
local_progdir, constants.ConductoPaths.SERIALIZATION
)
with open(serialization_path, "w") as f:
f.write(serialization)
api.Pipeline().update(token, pipeline_id, {"program_path": serialization_path})
run_in_local_container(
token, pipeline_id, inject_env=inject_env, is_migration=is_migration
)
if build_mode == constants.BuildMode.DEPLOY_TO_CLOUD:
func = cloud_deploy
starting = False
else:
func = local_deploy
starting = True
run(token, pipeline_id, func, use_app, use_shell, "Starting", starting)
return pipeline_id
def run(token, pipeline_id, func, use_app, use_shell, msg, starting):
from .. import api, shell_ui
url = api.Config().get_connect_url(pipeline_id)
u_url = log.format(url, underline=True)
if starting:
tag = api.Config().get_image_tag()
manager_image = constants.ImageUtil.get_manager_image(tag)
try:
client_utils.subprocess_run(["docker", "image", "inspect", manager_image])
except client_utils.CalledProcessError:
docker_parts = ["docker", "pull", manager_image]
print("Downloading the Conducto docker image that runs your pipeline.")
log.debug(" ".join(pipes.quote(s) for s in docker_parts))
client_utils.subprocess_run(
docker_parts, msg="Error pulling manager container",
)
print(f"{msg} pipeline {pipeline_id}.")
func()
if _manager_debug():
return
if use_app:
print(
f"Viewing at {u_url}. To disable, specify '--no-app' on the command line."
)
hostdet.system_open(url)
else:
print(f"View at {u_url}")
data = api.Pipeline().get(token, pipeline_id)
if data.get("is_public"):
unauth_password = data["<PASSWORD>"]
url = api.Config().get_url()
public_url = f"{url}/app/s/{pipeline_id}/{unauth_password}"
u_public_url = log.format(public_url, underline=True)
print(f"\nPublic view at:\n{u_public_url}")
if use_shell:
shell_ui.connect(token, pipeline_id, "Deploying")
def run_in_local_container(
token, pipeline_id, update_token=False, inject_env=None, is_migration=False
):
# Remote base dir will be verified by container.
local_basedir = constants.ConductoPaths.get_local_base_dir()
if inject_env is None:
inject_env = {}
if hostdet.is_wsl():
local_basedir = os.path.realpath(local_basedir)
local_basedir = hostdet.wsl_host_docker_path(local_basedir)
elif hostdet.is_windows():
local_basedir = hostdet.windows_docker_path(local_basedir)
else:
subp = subprocess.Popen(
"head -1 /proc/self/cgroup|cut -d/ -f3",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
container_id, err = subp.communicate()
container_id = container_id.decode("utf-8").strip()
if container_id:
# Mount to the ~/.conducto of the host machine and not of the container
import json
subp = subprocess.Popen(
f"docker inspect -f '{{{{ json .Mounts }}}}' {container_id}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
mount_data, err = subp.communicate()
if subp.returncode == 0:
mounts = json.loads(mount_data)
for mount in mounts:
if mount["Destination"] == local_basedir:
local_basedir = mount["Source"]
break
# The homedir inside the manager is /root
remote_basedir = "/root/.conducto"
tag = api.Config().get_image_tag()
manager_image = constants.ImageUtil.get_manager_image(tag)
ccp = constants.ConductoPaths
pipelinebase = ccp.get_local_path(pipeline_id, expand=False, base=remote_basedir)
# Note: This path is in the docker which is always unix
pipelinebase = pipelinebase.replace(os.path.sep, "/")
serialization = f"{pipelinebase}/{ccp.SERIALIZATION}"
container_name = f"conducto_manager_{pipeline_id}"
network_name = os.getenv("CONDUCTO_NETWORK", f"conducto_network_{pipeline_id}")
if not is_migration:
try:
client_utils.subprocess_run(
["docker", "network", "create", network_name, "--label=conducto"]
)
except client_utils.CalledProcessError as e:
if f"network with name {network_name} already exists" in e.stderr.decode():
pass
else:
raise
flags = [
# Detached mode.
"-d",
# Remove container when done.
"--rm",
# --name is the name of the container, as in when you do `docker ps`
# --hostname is the name of the host inside the container.
# Set them equal so that the manager can use socket.gethostname() to
# spin up workers that connect to its network.
"--name",
container_name,
"--network",
network_name,
"--hostname",
container_name,
"--label",
"conducto",
# Mount local conducto basedir on container. Allow TaskServer
# to access config and serialization and write logs.
"-v",
f"{local_basedir}:{remote_basedir}",
# Mount docker sock so we can spin out task workers.
"-v",
"/var/run/docker.sock:/var/run/docker.sock",
# Specify expected base dir for container to verify.
"-e",
f"CONDUCTO_BASE_DIR_VERIFY={remote_basedir}",
"-e",
f"CONDUCTO_LOCAL_BASE_DIR={local_basedir}",
"-e",
f"CONDUCTO_LOCAL_HOSTNAME={socket.gethostname()}",
"-e",
f"CONDUCTO_NETWORK={network_name}",
]
for env_var in (
"CONDUCTO_URL",
"CONDUCTO_CONFIG",
"IMAGE_TAG",
"CONDUCTO_DEV_REGISTRY",
):
if os.environ.get(env_var):
flags.extend(["-e", f"{env_var}={os.environ[env_var]}"])
for k, v in inject_env.items():
flags.extend(["-e", f"{k}={v}"])
if hostdet.is_wsl() or hostdet.is_windows():
drives = docker_available_drives()
if docker_desktop_23():
flags.extend(["-e", "WINDOWS_HOST=host_mnt"])
else:
flags.extend(["-e", "WINDOWS_HOST=plain"])
for d in drives:
# Mount whole system read-only to enable rebuilding images as needed
mount = f"type=bind,source={d}:/,target={constants.ConductoPaths.MOUNT_LOCATION}/{d.lower()},readonly"
flags += ["--mount", mount]
else:
# Mount whole system read-only to enable rebuilding images as needed
mount = f"type=bind,source=/,target={constants.ConductoPaths.MOUNT_LOCATION},readonly"
flags += ["--mount", mount]
if _manager_debug():
flags[0] = "-it"
flags += ["-e", "CONDUCTO_LOG_LEVEL=0"]
capture_output = False
else:
capture_output = True
mcpu = _manager_cpu()
if mcpu > 0:
flags += ["--cpus", str(mcpu)]
# WSL doesn't persist this into containers natively
# Have to have this configured so that we can use host docker creds to pull containers
docker_basedir = constants.ConductoPaths.get_local_docker_config_dir()
if docker_basedir:
flags += ["-v", f"{docker_basedir}:/root/.docker"]
cmd_parts = [
"python",
"-m",
"manager.src",
"-p",
pipeline_id,
"-i",
serialization,
"--profile",
api.Config().default_profile,
"--local",
]
if update_token:
cmd_parts += ["--update_token", "--token", token]
if manager_image.startswith("conducto/"):
docker_parts = ["docker", "pull", manager_image]
log.debug(" ".join(pipes.quote(s) for s in docker_parts))
client_utils.subprocess_run(
docker_parts,
capture_output=capture_output,
msg="Error pulling manager container",
)
# Run manager container.
docker_parts = ["docker", "run"] + flags + [manager_image] + cmd_parts
log.debug(" ".join(pipes.quote(s) for s in docker_parts))
client_utils.subprocess_run(
docker_parts,
msg="Error starting manager container",
capture_output=capture_output,
)
# When in debug mode the manager is run attached and it makes no sense to
# follow that up with waiting for the manager to start.
if not _manager_debug():
log.debug(f"Verifying manager docker startup pipeline_id={pipeline_id}")
def _get_docker_output():
p = subprocess.run(["docker", "ps"], stdout=subprocess.PIPE)
return p.stdout.decode("utf-8")
pl = constants.PipelineLifecycle
target = pl.active - pl.standby
# wait 45 seconds, but this should be quick
for _ in range(
int(
constants.ManagerAppParams.WAIT_TIME_SECS
/ constants.ManagerAppParams.POLL_INTERVAL_SECS
)
):
time.sleep(constants.ManagerAppParams.POLL_INTERVAL_SECS)
log.debug(f"awaiting program {pipeline_id} active")
data = api.Pipeline().get(token, pipeline_id)
if data["status"] in target and data["pgw"] not in ["", None]:
break
| |
re.match(r'\+B|B', Bboard.b1g)and b3i==''\
and board.s2h=='':
moves = '1g3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b1g)and b3e==''\
and board.s2f=='':
moves = '1g3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b1g)and b4d==''\
and board.s2f+board.s3e=='':
moves = '1g4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1g)and b5c==''\
and board.s2f+board.s3e+board.s4d=='':
moves = '1g5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1g)and b6b==''\
and board.s2f+board.s3e+board.s4d+board.s5c=='':
moves = '1g6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b1g)and b7a==''\
and board.s2f+board.s3e+board.s4d+board.s5c+board.s6b=='':
moves = '1g7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b2g !='':
if re.match(r'[PLSGRK+]', Bboard.b2g)and b2f=='':
moves = '2g2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b2g)and b1f=='':
moves = '2g1f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b2g)and b3f=='':
moves = '2g3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2g)and b1g=='':
moves = '2g1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2g)and b3g=='':
moves = '2g3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b2g)and b2h=='':
moves = '2g2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b2g)and b1h=='':
moves = '2g1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b2g)and b3h=='':
moves = '2g3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b2g)and b1e=='':
moves = '2g1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b2g)and b3e=='':
moves = '2g3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b2g)and b2a==''\
and board.s2b+board.s2c+board.s2d+board.s2e+board.s2f=='':
moves = '2g2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2g)and b2a==''\
and board.s2b+board.s2c+board.s2d+board.s2e+board.s2f=='':
moves = '2g2a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b2g)and b2b==''\
and board.s2c+board.s2d+board.s2e+board.s2f=='':
moves = '2g2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2g)and b2b==''\
and board.s2c+board.s2d+board.s2e+board.s2f=='':
moves = '2g2b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|L', Bboard.b2g)and b2c==''\
and board.s2d+board.s2e+board.s2f=='':
moves = '2g2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b2g)and b2c==''\
and board.s2d+board.s2e+board.s2f=='':
moves = '2g2c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b2g)and b2d==''\
and board.s2e+board.s2f=='':
moves = '2g2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b2g)and b2e==''\
and board.s2f=='':
moves = '2g2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b2i==''\
and board.s2h=='':
moves = '2g2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b4g==''\
and board.s3g=='':
moves = '2g4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b5g==''\
and board.s3g+board.s4g=='':
moves = '2g5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b6g==''\
and board.s3g+board.s4g+board.s5g=='':
moves = '2g6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b7g==''\
and board.s3g+board.s4g+board.s5g+board.s6g=='':
moves = '2g7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b8g==''\
and board.s3g+board.s4g+board.s5g+board.s6g+board.s7g=='':
moves = '2g8g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b2g)and b9g==''\
and board.s3g+board.s4g+board.s5g+board.s6g+board.s7g+board.s8g=='':
moves = '2g9g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b2g)and b6c==''\
and board.s3f+board.s4e+board.s5d=='':
moves = '2g6c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b2g)and b7b==''\
and board.s3f+board.s4e+board.s5d+board.s6c=='':
moves = '2g7b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b2g)and b8a==''\
and board.s3f+board.s4e+board.s5d+board.s6c+board.s7b=='':
moves = '2g8a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2g)and b4e==''\
and board.s3f=='':
moves = '2g4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2g)and b5d==''\
and board.s3f+board.s4e=='':
moves = '2g5d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2g)and b6c==''\
and board.s3f+board.s4e+board.s5d=='':
moves = '2g6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2g)and b7b==''\
and board.s3f+board.s4e+board.s5d+board.s6c=='':
moves = '2g7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b2g)and b8a==''\
and board.s3f+board.s4e+board.s5d+board.s6c+board.s7b=='':
moves = '2g8a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b2g)and b4i==''\
and board.s3h=='':
moves = '2g4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b3g !='':
if re.match(r'[PLSGRK+]', Bboard.b3g)and b3f=='':
moves = '3g3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b3g)and b2f=='':
moves = '3g2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b3g)and b4f=='':
moves = '3g4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3g)and b2g=='':
moves = '3g2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3g)and b4g=='':
moves = '3g4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b3g)and b3h=='':
moves = '3g3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b3g)and b2h=='':
moves = '3g2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b3g)and b4h=='':
moves = '3g4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b3g)and b2e=='':
moves = '3g2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b3g)and b4e=='':
moves = '3g4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b3g)and b3a==''\
and board.s3b+board.s3c+board.s3d+board.s3e+board.s3f=='':
moves = '3g3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b3g)and b3a==''\
and board.s3b+board.s3c+board.s3d+board.s3e+board.s3f=='':
moves = '3g3a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b3g)and b3b==''\
and board.s3c+board.s3d+board.s3e+board.s3f=='':
moves = '3g3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b3g)and b3b==''\
and board.s3c+board.s3d+board.s3e+board.s3f=='':
moves = '3g3b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|L', Bboard.b3g)and b3c==''\
and board.s3d+board.s3e+board.s3f=='':
moves = '3g3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b3g)and b3c==''\
and board.s3d+board.s3e+board.s3f=='':
moves = '3g3c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b3g)and b3d==''\
and board.s3e+board.s3f=='':
moves = '3g3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R|L', Bboard.b3g)and b3e==''\
and board.s3f=='':
moves = '3g3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b3i==''\
and board.s3h=='':
moves = '3g3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b1g==''\
and board.s2g=='':
moves = '3g1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b5g==''\
and board.s4g=='':
moves = '3g5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b6g==''\
and board.s4g+board.s5g=='':
moves = '3g6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b7g==''\
and board.s4g+board.s5g+board.s6g=='':
moves = '3g7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b8g==''\
and board.s4g+board.s5g+board.s6g+board.s7g=='':
moves = '3g8g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|R', Bboard.b3g)and b9g==''\
and board.s4g+board.s5g+board.s6g+board.s7g+board.s8g=='':
moves = '3g9g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b3g)and b7c==''\
and board.s4f+board.s5e+board.s6d=='':
moves = '3g7c+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b3g)and b8b==''\
and board.s4f+board.s5e+board.s6d+board.s7c=='':
moves = '3g8b+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('B',Bboard.b3g)and b9a==''\
and board.s4f+board.s5e+board.s6d+board.s7c+board.s8b=='':
moves = '3g9a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b3g)and b1i==''\
and board.s2h=='':
moves = '3g1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b3g)and b5e==''\
and board.s4f=='':
moves = '3g5e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b3g)and b6d==''\
and board.s4f+board.s5e=='':
moves = '3g6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b3g)and b7c==''\
and board.s4f+board.s5e+board.s6d=='':
moves = '3g7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b3g)and b8b==''\
and board.s4f+board.s5e+board.s6d+board.s7c=='':
moves = '3g8b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+B', Bboard.b3g)and b9a==''\
and board.s4f+board.s5e+board.s6d+board.s7c+board.s8b=='':
moves = '3g9a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b3g)and b5i==''\
and board.s4h=='':
moves = '3g5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+B|B', Bboard.b3g)and b1e==''\
and board.s2f=='':
moves = '3g1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Bboard.b4g !='':
if re.match(r'[PLSGRK+]', Bboard.b4g)and b4f=='':
moves = '4g4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b4g)and b3f=='':
moves = '4g3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[SGBK+]', Bboard.b4g)and b5f=='':
moves = '4g5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b4g)and b3g=='':
moves = '4g3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b4g)and b5g=='':
moves = '4g5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[GRK+]', Bboard.b4g)and b4h=='':
moves = '4g4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b4g)and b3h=='':
moves = '4g3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+R|\+B|B|S|K',Bboard.b4g)and b5h=='':
moves = '4g5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b4g)and b3e=='':
moves = '4g3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('N', Bboard.b4g)and b5e=='':
moves = '4g5e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b4g)and b4a==''\
and board.s4b+board.s4c+board.s4d+board.s4e+board.s4f=='':
moves = '4g4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b4g)and b4a==''\
and board.s4b+board.s4c+board.s4d+board.s4e+board.s4f=='':
moves = '4g4a+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+R', Bboard.b4g)and b4b==''\
and board.s4c+board.s4d+board.s4e+board.s4f=='':
moves = '4g4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'R|L', Bboard.b4g)and b4b==''\
and board.s4c+board.s4d+board.s4e+board.s4f=='':
moves = '4g4b+'
| |
import os.path
import sys
import types
import typing
import unittest
from datetime import datetime, date
from functools import wraps
from io import BytesIO, StringIO
from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, \
AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, \
NoReturn, ClassVar
from enum import Enum, IntEnum
from pedantic import pedantic_class
from pedantic.exceptions import PedanticTypeCheckException, PedanticException, PedanticCallWithArgsException, \
PedanticTypeVarMismatchException
from pedantic.decorators.fn_deco_pedantic import pedantic
TEST_FILE = 'test.txt'
class Parent:
pass
class Child(Parent):
def method(self, a: int):
pass
class TestDecoratorRequireKwargsAndTypeCheck(unittest.TestCase):
def tearDown(self) -> None:
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
def test_no_kwargs(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, 40, 38)
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, m=40, i=38)
calc(n=42, m=40, i=38)
def test_nested_type_hints_1(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [0.0 * n]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_1_corrected(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [[0.0 * n]]
calc(n=42)
def test_nested_type_hints_2(self):
"""Problem here: int != float"""
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(n, str(n))]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_2_corrected(self):
@pedantic
def calc(n: int) -> List[Tuple[int, str]]:
return [(n, str(n))]
@pedantic
def calc_2(n: float) -> List[Tuple[float, str]]:
return [(n, str(n))]
calc(n=42)
calc_2(n=42.0)
def test_nested_type_hints_3(self):
"""Problem here: inner function actually returns Tuple[int, str]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * x, str(y)
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_3_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[int, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[int, str]:
return n * x, str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_4(self):
"""Problem here: return type is actually float"""
@pedantic
def calc(n: List[List[float]]) -> int:
return n[0][0]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=[[42.0]])
def test_nested_type_hints_corrected(self):
@pedantic
def calc(n: List[List[float]]) -> int:
return int(n[0][0])
calc(n=[[42.0]])
def test_nested_type_hints_5(self):
"""Problem here: Tuple[float, str] != Tuple[float, float]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_5_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, float]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
calc(n=42)
def test_missing_type_hint_1(self):
"""Problem here: type hint for n missed"""
@pedantic
def calc(n) -> float:
return 42.0 * n
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_1_corrected(self):
@pedantic
def calc(n: int) -> float:
return 42.0 * n
calc(n=42)
def test_missing_type_hint_2(self):
"""Problem here: Return type annotation missed"""
@pedantic
def calc(n: int):
return 'Hi' + str(n)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_2_corrected(self):
@pedantic
def calc(n: int) -> str:
return 'Hi' + str(n)
calc(n=42)
def test_missing_type_hint_3(self):
"""Problem here: type hint for i missed"""
@pedantic
def calc(n: int, m: int, i) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_missing_type_hint_3_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_all_ok_2(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_3(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_4(self):
@pedantic
def calc(n: int) -> List[List[int]]:
return [[n]]
calc(n=42)
def test_all_ok_5(self):
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(float(n), str(n))]
calc(n=42)
def test_all_ok_6(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=72, y=3.14)
def test_all_ok_7(self):
@pedantic
def calc(n: List[List[float]]) -> Any:
return n[0][0]
calc(n=[[42.0]])
def test_all_ok_8(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_wrong_type_hint_1(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: int) -> str:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_1_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2_corrected(self):
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + int(i)
calc(n=42, m=40, i='38')
def test_wrong_type_hint_3(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> None:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
print(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
print(n + m + i)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_none_1(self):
"""Problem here: None is not accepted"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_2(self):
@pedantic
def calc(n: int, m: int, i: Optional[int]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_3(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_4(self):
"""Problem here: function may return None"""
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else None
calc(n=42, m=40, i=42)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_5(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> Optional[int]:
return n + m + i if i is not None else None
calc(n=42, m=40, i=None)
def test_inheritance_1(self):
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassA) -> str:
return str(a)
calc(a=MyClassA())
calc(a=MyClassB())
def test_inheritance_2(self):
"""Problem here: A is not a subtype of B"""
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassB) -> str:
return str(a)
calc(a=MyClassB())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(a=MyClassA())
def test_instance_method_1(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_2(self):
"""Problem here: 'i' has no type annotation"""
class MyClassA:
@pedantic
def calc(self, i) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_2_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_int_is_not_float(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_3_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
a.calc(i=42.0)
def test_instance_method_no_kwargs(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
a.calc(42)
def test_instance_method_5(self):
"""Problem here: instance methods is not called with kwargs"""
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_lambda_1(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
return lambda x: str(x * i)
calc(i=42.0)(10.0)
def test_lambda_3(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: float) -> str:
return str(x * i)
return res
calc(i=42.0)(10.0)
def test_lambda_int_is_not_float(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected(self):
"""Problem here: float != str"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected_2(self):
@pedantic
def calc(i: float) -> Callable[[int], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
calc(i=42.0)(x=10)
def test_lambda_5(self):
"""Problem here: float != int"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: float) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_corrected(self):
| |
<gh_stars>10-100
from typing import Union, List, Optional, Callable, Dict, Sequence
import numpy
import pandas
import seaborn
import sklearn
from joblib import Parallel, delayed
from matplotlib import pyplot, axes
from numpy.random.mtrand import RandomState
from sklearn import clone
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
from sklearn.utils import shuffle
def plot_confusion_matrix(y_test: numpy.ndarray, y_pred: numpy.ndarray, labels: List[Union[str, int]],
sample_weight: Optional[List[float]] = None, annot_kws=None, cbar=True, cbar_kws=None,
**kwargs) -> axes.Axes:
"""
Computes and plot confusion matrix, False Positive Rate, False Negative Rate, Accuracy and F1 score of a
classification.
:param y_test: array, shape = [n_samples]. Ground truth (correct) target values.
:param y_pred: array, shape = [n_samples]. Estimated targets as returned by a classifier.
:param labels: array, shape = [n_classes]. List of labels to index the matrix. This may be used to reorder or
select a subset of labels.
:param sample_weight: array-like of shape = [n_samples], optional
Sample weights.
:param annot_kws: dict of key, value mappings, optional
Keyword arguments for ``ax.text``.
:param cbar: boolean, optional
Whether to draw a colorbar.
:param cbar_kws: dict of key, value mappings, optional
Keyword arguments for ``figure.colorbar``
:param kwargs: other keyword arguments
All other keyword arguments are passed to ``matplotlib.axes.Axes.pcolormesh()``.
:return: Returns the Axes object with the matrix drawn onto it.
"""
if len(labels) < 2:
raise ValueError("Number of labels must be greater than 1")
cnf_matrix = confusion_matrix(y_test, y_pred, labels=labels, sample_weight=sample_weight)
if len(labels) == 2:
tn, fp, fn, tp = cnf_matrix.ravel()
npv, ppv, tnr, tpr = _calc_precision_recall(fn, fp, tn, tp)
table = numpy.array([[tn, fp, tnr], [fn, tp, tpr], [npv, ppv, numpy.NaN]], dtype=numpy.float)
df = pandas.DataFrame(table, columns=[f"{labels[0]} - Predicted", f"{labels[1]} - Predicted", "Recall"],
index=[f"{labels[0]} - Actual", f"{labels[1]} - Actual", "Precision"])
else:
fp = (cnf_matrix.sum(axis=0) - numpy.diag(cnf_matrix)).astype(float)
fn = (cnf_matrix.sum(axis=1) - numpy.diag(cnf_matrix)).astype(float)
tp = (numpy.diag(cnf_matrix)).astype(float)
tn = (cnf_matrix.sum() - (fp + fn + tp)).astype(float)
_, ppv, tnr, tpr = _calc_precision_recall(fn, fp, tn, tp)
df = pandas.DataFrame(cnf_matrix, columns=[f"{label} - Predicted" for label in labels],
index=[f"{label} - Actual" for label in labels])
df["Recall"] = tpr
df = df.append(
pandas.DataFrame([ppv], columns=[f"{label} - Predicted" for label in labels], index=["Precision"]),
sort=False)
figure, subplots = pyplot.subplots(nrows=3, ncols=1, gridspec_kw={'height_ratios': [1, 8, 1]})
subplots = subplots.flatten()
subplots[0].set_axis_off()
if len(labels) == 2:
subplots[0].text(0, 0.85, f"False Positive Rate: {1 - tnr:.4f}")
subplots[0].text(0, 0.35, f"False Negative Rate: {1 - tpr:.4f}")
else:
subplots[0].text(0, 0.85, f"False Positive Rate: {numpy.array2string(1 - tnr, precision=2, separator=',')}")
subplots[0].text(0, 0.35, f"False Negative Rate: {numpy.array2string(1 - tpr, precision=2, separator=',')}")
subplots[0].text(0, -0.5, "Confusion Matrix:")
seaborn.heatmap(df, annot=True, fmt=".3f", ax=subplots[1], annot_kws=annot_kws, cbar=cbar, cbar_kws=cbar_kws,
**kwargs)
subplots[2].set_axis_off()
subplots[2].text(0, 0.15, f"Accuracy: {accuracy_score(y_test, y_pred, sample_weight=sample_weight):.4f}")
if len(labels) == 2:
f_score = f1_score(y_test, y_pred, labels=labels, pos_label=labels[1], average="binary",
sample_weight=sample_weight)
else:
f_score = f1_score(y_test, y_pred, labels=labels, average="micro", sample_weight=sample_weight)
subplots[2].text(0, -0.5, f"F1 Score: {f_score:.4f}")
return subplots
def _calc_precision_recall(fn, fp, tn, tp):
tpr = (tp / (tp + fn))
tnr = (tn / (tn + fp))
npv = (tn / (tn + fn))
ppv = (tp / (tp + fp))
return npv, ppv, tnr, tpr
def plot_metric_growth_per_labeled_instances(X_train: numpy.ndarray, y_train: numpy.ndarray, X_test: numpy.ndarray,
y_test: numpy.ndarray,
classifiers_dict: Dict[str, sklearn.base.ClassifierMixin],
n_samples: Optional[List[int]] = None,
quantiles: Optional[List[float]] = numpy.linspace(0.05, 1, 20).tolist(),
metric: Callable[[numpy.ndarray,
numpy.ndarray], float] = accuracy_score,
random_state: Optional[Union[int, RandomState]] = None,
n_jobs: Optional[int] = None, verbose: int = 0,
pre_dispatch: Optional[Union[int, str]] = "2*n_jobs", *,
ax: Optional[axes.Axes] = None, **kwargs) -> axes.Axes:
"""
Receives a train and test sets, and plots given metric change in increasing amount of trained instances.
:param X_train: {array-like or sparse matrix} of shape (n_samples, n_features)
The training input samples.
:param y_train: 1d array-like, or label indicator array / sparse matrix
The target values (class labels) as integers or strings.
:param X_test: {array-like or sparse matrix} of shape (n_samples, n_features)
The test or evaluation input samples.
:param y_test: 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
:param classifiers_dict: mapping from classifier name to classifier object.
:param n_samples: List of numbers of samples for training batches, optional (default=None).
:param quantiles: List of percentages of samples for training batches, optional (default=[0.05, 0.1, 0.15, 0.2,
0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1].
Used when n_samples=None.
:param metric: sklearn.metrics api function which receives y_true and y_pred and returns float.
:param random_state: int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling the data.
* If int, random_state is the seed used by the random number generator;
* If RandomState instance, random_state is the random number generator;
* If None, the random number generator is the RandomState instance initiated with seed zero.
:param n_jobs: int or None, optional (default=None)
Number of jobs to run in parallel.
* ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
* ``-1`` means using all processors.
:param verbose: integer. Controls the verbosity: the higher, the more messages.
:param pre_dispatch: int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
:param ax: Axes object to draw the plot onto, otherwise uses the current Axes.
:param kwargs: other keyword arguments
All other keyword arguments are passed to ``matplotlib.axes.Axes.pcolormesh()``.
:return: Returns the Axes object with the plot drawn onto it.
"""
if ax is None:
pyplot.figure()
ax = pyplot.gca()
if random_state is None:
random_state = RandomState(seed=0)
elif not isinstance(random_state, RandomState):
random_state = RandomState(seed=random_state)
if n_samples is None:
if quantiles is not None:
n_samples = [int(quantile * X_train.shape[0]) for quantile in quantiles]
else:
raise ValueError("n_samples must be specified if quantiles is None")
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
classifiers_dict_results = dict()
samples_list = [shuffle(X_train, y_train, random_state=random_state, n_samples=n_sample) for n_sample in n_samples]
with parallel:
for classifier_name, classifier in classifiers_dict.items():
if verbose > 0:
print(f"Fitting classifier {classifier_name} for {len(n_samples)} times")
scores = parallel(delayed(_perform_data_partition_and_evaluation)(x_train_part, y_train_part,
X_test, y_test, clone(classifier),
metric) for
x_train_part, y_train_part in samples_list)
classifiers_dict_results.update({classifier_name: scores})
for classifier_name, scores in classifiers_dict_results.items():
ax.plot(n_samples, scores, label=classifier_name, **kwargs)
ax.legend(loc="lower right", **kwargs)
return ax
def _perform_data_partition_and_evaluation(X_train, y_train, X_test, y_test, classifier, metric):
if y_train.shape[1] == 1:
classifier.fit(X_train, y_train.values.ravel())
else:
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
return metric(y_test, y_pred)
def visualize_accuracy_grouped_by_probability(y_test: numpy.ndarray, labeled_class: Union[str, int],
probabilities: numpy.ndarray,
threshold: float = 0.5, display_breakdown: bool = False,
bins: Optional[Union[int, Sequence[float], pandas.IntervalIndex]] = None,
*, ax: Optional[axes.Axes] = None, **kwargs) -> axes.Axes:
"""
Receives test true labels and classifier probabilities predictions, divide and classify the results and finally
plots a stacked bar chart with the results.
:param y_test: array, shape = [n_samples]. Ground truth (correct) target values.
:param labeled_class: the class to enquire for.
:param probabilities: array, shape = [n_samples]. classifier probabilities for the labeled class.
:param threshold: the probability threshold for classifying the labeled class.
:param display_breakdown: if True the results will be displayed as "correct" and "incorrect"; otherwise
as "true-positives", "true-negative", "false-positives" and "false-negative"
:param bins: int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of x. The range of x is extended by .1% on each side to include the minimum and maximum values of x.
* sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of x is done.
* IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for bins must be non-overlapping.
default: [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
:param ax: Axes object to draw the plot onto, otherwise uses the current Axes.
:param kwargs: other keyword arguments
All other keyword arguments are passed to ``matplotlib.axes.Axes.pcolormesh()``.
:return: Returns | |
<filename>Code/Experiment - versions/code_temp.py
#####################################################################################################
## Deze code werd geschreven in het kader van de masterproef van <NAME> #
## De code werd geschreven in PsychoPy, en is daardoor misschien niet volledig executable in Python #
#####################################################################################################
from __future__ import division
from psychopy import core, visual, event
from random import randint
from random import randrange, uniform
from random import shuffle
import random
import datetime
import functools
import time
import datetime
import numpy as np
import pandas as pd
import os
import csv
import sys
#######################################################################################################################################################################################
# !! Declareren proefpersoon nummer !!
proefpersoonNR = 1
#######################################################################################################################################################################################
# Bepalen window properties
win = visual.Window(size = (800,600), color='white')
## win = visual.Window(fullscr = True, color='white')
#######################################################################################################################################################################################
# Working directory
# Fase 1
# Oefenblok
os.chdir('C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/Code/RandomisatieFiles/Excercise phase')
#######################################################################################################################################################################################
# Uitlegblok
# Fase 1
# Experimental block
# In dit deel zien de participanten een aantal producten die kunnen gekochten worden in de winkel
# Daarbovenop worden prijzen getoond vlak na het verschijnen van de producten
# De participanten moeten aanduiden of de prijs die ze zien DUUR of GOEDKOOP is voor het product dat ze zien VLAK VOOR de prijs
# De bedoeling is dat ze zo snel als mogelijk reageren op de prijs door gebruik te maken van de 'f' en 'j' toets
## (die worden afgeplakt in het lab zelf (er wordt verwezen naar de toetsen als 'rechts/links') )
# De escape toets kan gebruikt worden om te ontsnappen uit het experiment
# De verwachting is dat participanten:
# sneller reageren op goedkopere vs. duurdere prijzen
# sneller op extremere prijzen (price1, 2, 5 & 6)
# Zowel de reactietijden als de response die ze geven worden geregistreerd en weggeschreven
# In dit deel krijgen de participanten de kans om zo snel als mogelijk te reageren
# Ze kunnen enkel reageren met de f en j toets, de andere toetsen werken niet (los van de 'escape', die kan gebruikt worden als break)
# Reactietijden worden gemeten voor iedere trial, en weggeschreven naar een list
# In dit experiment wordt gebruik gemaakt van een adaptieve response threshold:
# Op basis van hun reactietijd wordt een response deadline ingesteld
# Dus, hoe trager ze zijn, hoe langer zijn krijgen voor hun antwoord als 'te traag' wordt beschouwd
# Dit werd gedaan met het oog op de variatie in reactiesnelheid binnen de populatie
# In totaal zijn er 336 trials, waarbij de participanten iedere combinatie van alle 8 producten en 7 prijzen 6 keer ziet
# Zo komen we aan 56 (7*8) prijs-product combinaties, die we elk 6 keer zien, dus 56*6 = 336 trials
# De randomisatiefiles van dit experiment werden op voorhand gemaakt, waarbij de gerandomiseerde volgorde van de producten & prijzen wordt ingelezen vanuit een externe file
# Dit werd gedaan om de pc niet extra te belasten met het berekenen van een randomisatie
#######################################################################################################################################################################################
# Working directory
# Fase 1
# Experimental block
os.chdir('C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/Code/RandomisatieFiles/Experiment order files')
#######################################################################################################################################################################################
# Inlezen en verwerken van file
# Randomisation file
# Fase 1
# Experimental block
dataPP = []
with open("ProductPrice_Order_PP_%02d.txt" %proefpersoonNR, 'r') as f:
reader = csv.reader(f, dialect = 'excel', delimiter = '\t')
for row in reader:
print row
dataPP.append(row)
del(dataPP[0])
clean = []
for i in range(336):
nodig = dataPP[i]
del(nodig[0])
clean.append(nodig)
trial = clean
print '@@@'
print len(trial), trial
print '@@@'
#######################################################################################################################################################################################
# Definities
# Fase 1
# Experimental block
# Declareren tekst input
instr9 = 'Hallo, welkom bij dit experiment. \n \n(bij elk scherm waar uitleg wordt gegeven kan je verder gaan door een willekeurige toets in te drukken)'
instr10 = 'Tijdens dit experiment zal je een reeks producten gevolgd door prijzen zien verschijnen. \n \n'
instr11 = 'Na een aantal seconden zal het product verdwijnen en zal je een prijs zien verschijnen. \n \nHet is jouw taak om zo snel mogelijk aan te geven of je deze prijs GOEDKOOP of DUUR vindt voor het product dat je net zag.'
instr12 = 'Als je de prijs GOEDKOOP vindt, druk dan zo snel mogelijk op de "linker"-toets. \n\nAls je de prijs DUUR vindt, druk dan zo snel mogelijk op de "rechter"-toets.'
instr13 = 'Vanaf het moment dat de prijs getoond wordt dien je zo snel mogelijk te drukken. \n\nHet is niet mogelijk om even te oefenen, probeer je dus onmiddellijk te concentreren. Er zijn voldoende pauze voorzien, gebruik deze om wat te rusten.'
instr14 = 'Als alles duidelijk is mag je op een toets naar keuze drukken, en het experiment zal starten na 5 seconden. \n\nAls er nog vragen zijn kan je deze nu stellen aan de proefleider.'
time1 = '1'
time2 = '2'
time3 = '3'
time4 = '4'
time5 = '5'
instrSlow = "Te traag"
EndInstr1 = 'Dit deel is afgelopen is afgelopen. \n \nDruk op een willekeurige toets om verder te gaan met het volgende gedeelte.'
Fatal = 'Oeps! Er is iets fout gegaan... \n \nRoep de experimentleider.'
Pause = 'Hier kan je even rusten, druk op een willekeurige knop om verder te gaan. \n \nJe mag zo lang rusten als je zelf wil.'
# Declareren van PsychoPy text properties
instruction9 = visual.TextStim(win, text=instr9,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instruction10 = visual.TextStim(win, text=instr10,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instruction11 = visual.TextStim(win, text=instr11,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instruction12 = visual.TextStim(win, text=instr12,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instruction13 = visual.TextStim(win, text=instr13,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instruction14 = visual.TextStim(win, text=instr14,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing1 = visual.TextStim(win, text=time1,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing2 = visual.TextStim(win, text=time2,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing3 = visual.TextStim(win, text=time3,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing4 = visual.TextStim(win, text=time4,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing5 = visual.TextStim(win, text=time5,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
tooSlow = visual.TextStim(win, text=instrSlow,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
EndInstruction1 = visual.TextStim(win, text=EndInstr1,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
FatalMessage = visual.TextStim(win, text=Fatal,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
Fixationcross = visual.TextStim(win, text="+", height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
Pausing = visual.TextStim(win, text=Pause, height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
#######################################################################################################################################################################################
# Muis onzichtbaar maken
mouse = event.Mouse(visible = False, newPos = (0,0), win = win_exp)
# Tonen van instructies aan participanten
core.wait(0.5)
while True:
instruction9.draw()
win.flip()
event.waitKeys()
instruction10.draw()
win.flip()
event.waitKeys()
instruction11.draw()
win.flip()
event.waitKeys()
instruction12.draw()
win.flip()
event.waitKeys()
instruction13.draw()
win.flip()
event.waitKeys()
instruction14.draw()
win.flip()
event.waitKeys()
break
#######################################################################################################################################################################################
# Aftellen
while True:
timing5.draw()
win.flip()
time.sleep(1)
timing4.draw()
win.flip()
time.sleep(1)
timing3.draw()
win.flip()
time.sleep(1)
timing2.draw()
win.flip()
time.sleep(1)
timing1.draw()
win.flip()
time.sleep(1)
break
#######################################################################################################################################################################################
# Executie van experiment
# Fase 1
# Experimental block
os.chdir('C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/Code/RandomisatieFiles/Antwoord files')
experiment_data = []
rt = []
meanRT = []
FaultyTrials = []
breaking = False
with open("1_Phase_1_PP_%02d_FailSave.txt" %proefpersoonNR, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow([datetime.datetime.now()])
writer.writerow(['Block number','SubjectNr','Product','Prijs','Key','RT (in ms)','Block RT','TooLate'])
try:
for i in range(len(trial)):
Pathway = "C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/PieterHuycke_paradigma/PieterHuycke/implicit/"
blocknumber = trial[i][0]
Product = trial[i][1]
Prijs = trial[i][2]
New = Pathway+Product
Newer = New + "/regular/"
StimProduct = Newer + Product + ".png"
product = visual.ImageStim(win, image= StimProduct)
StimPrijs = Newer + Prijs + ".png"
prijs = visual.ImageStim(win, image= StimPrijs)
timer = core.CountdownTimer(.5)
while timer.getTime() > 0:
Fixationcross.draw()
win.flip()
timer = core.CountdownTimer(2.5)
while timer.getTime() > 0:
product.draw()
win.flip()
event.clearEvents()
win.flip(clearBuffer=True)
prijs.draw()
win.flip()
t1 = int(round(time.time() * 1000))
event.clearEvents()
answer = event.waitKeys(keyList = ['Escape','escape', 'esc','f','j'])
t2 = int(round(time.time() * 1000))
reactiontime = int(t2-t1)
print('reactiontime is %d') %reactiontime
rt.append(reactiontime)
if i == 0 or len(rt)%8 == 0:
mean = sum(rt)/len(rt)
meanRT.append(mean)
BlockRT = meanRT[-1]
print ('We are at trial %d') %i
TooLate = 1
while len(rt)%8 != 0:
if len(rt) <= 8:
BlockRT = "oefenblok"
break
else:
if reactiontime > BlockRT:
win.flip(clearBuffer=True)
timer = core.CountdownTimer(.5)
while timer.getTime() > 0:
tooSlow.draw()
win.flip()
break
else:
break
if answer[0] in ['Escape','escape', 'esc']:
break
if answer[0] in ['f','j']:
if reactiontime <= BlockRT:
TooLate = 0
writer.writerow([blocknumber,proefpersoonNR,Product,Prijs,answer[0],reactiontime,BlockRT,TooLate])
experiment_data.append([blocknumber,proefpersoonNR,Product,Prijs,answer[0],reactiontime,BlockRT,TooLate])
else:
writer.writerow([blocknumber,proefpersoonNR,Product,Prijs,answer[0],reactiontime,BlockRT,TooLate])
experiment_data.append([blocknumber,proefpersoonNR,Product,Prijs,answer[0],reactiontime,BlockRT,TooLate])
FaultyTrials.append(trial[i])
win.flip(clearBuffer=True)
event.clearEvents()
if ((i%56 == 0) and (i != 0)):
win.flip(clearBuffer=True)
Pausing.draw()
win.flip()
event.waitKeys()
if i == (len(trial)-1):
writer.writerow([datetime.datetime.now()])
time.sleep(1)
except:
win.flip(clearBuffer=True)
FatalMessage.draw()
win.flip()
event.waitKeys()
with open("1_Phase_1_PP_%02d_LoggingFile.txt" %proefpersoonNR, 'w') as f:
e1 = sys.exc_info()[0]
e2 = sys.exc_info()[1]
writer = csv.writer(f,delimiter=' ')
writer.writerow([i,e1, e2])
#######################################################################################################################################################################################
# Wegschrijven data
# Fase 1
# Experimental block
print len(FaultyTrials), FaultyTrials
expData = pd.DataFrame(experiment_data, columns = ['Block number','SubjectNr','Product','Prijs','Key','RT (in ms)','Block RT','TooLate'])
print len(trial)
print expData
expData.to_csv("1_Phase_1_PP_%02d.txt" %proefpersoonNR, sep = '\t')
# Tonen van 'end message' van deze fase
while not event.getKeys():
EndInstruction1.draw()
win.flip()
#######################################################################################################################################################################################
# Uitlegblok
# Fase 2
# Familiariteit
# In dit blok krijgen de participanten ieder product te zien dat ze zagen in de eerste fase van het experiment (er waren toen 8 verschillende producten te zien)
# De bedoeling is om aan te duiden hoe zeer te participanten vertrouwd zijn met de producten die ze zagen
# Er zijn 5 opties:
# "a) Ja, ik koop dit product heel vaak (wekelijks)."
# "b) Ja, ik koop dit product vaak (maandelijks)."
# | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
from collections import OrderedDict
import six
import torch
from torch.nn import Module
import torch.nn.functional as F
from . import nn_fix, utils, quant
# ---- helpers ----
def _get_kwargs(self, true_kwargs):
default_kwargs = utils.get_kwargs(self.__class__)
if not default_kwargs:
return true_kwargs
# NOTE: here we do not deep copy the default values,
# so non-atom type default value such as dict/list/tensor will be shared
kwargs = {k: v for k, v in six.iteritems(default_kwargs)}
kwargs.update(true_kwargs)
return kwargs
def _get_fix_cfg(self, name, grad=False):
if not grad:
cfg = self.nf_fix_params.get(name, {})
if "scale" in cfg:
cfg["scale"] = self._buffers["{}_fp_scale".format(name)]
else:
cfg = self.nf_fix_params_grad.get(name, {})
if "scale" in cfg:
cfg["scale"] = self._buffers["{}_grad_fp_scale".format(name)]
return cfg
def _register_fix_buffers(self, patch_register=True):
# register scale tensors as buffers, for correct use in multi-gpu data parallel model
avail_keys = list(self._parameters.keys()) + list(self._buffers.keys())
for name, cfg in six.iteritems(self.nf_fix_params):
if patch_register and name not in avail_keys:
warnings.warn(
(
"{} not available in {}, this specific fixed config "
"will not have effects"
).format(name, self)
)
if "scale" in cfg:
self.register_buffer("{}_fp_scale".format(name), cfg["scale"])
avail_keys = list(self._parameters.keys())
for name, cfg in six.iteritems(self.nf_fix_params_grad):
if patch_register and name not in avail_keys:
warnings.warn(
(
"{} not available in {}, this specific grads fixed config "
"will not have effects"
).format(name, self)
)
if "scale" in cfg:
self.register_buffer("{}_grad_fp_scale".format(name), cfg["scale"])
# --------
def get_fix_forward(cur_cls):
# pylint: disable=protected-access
def fix_forward(self, inputs, **kwargs):
if not isinstance(inputs, dict):
inputs = {"inputs": inputs}
for n, param in six.iteritems(self._parameters):
# NOTE: Since Pytorch>=1.5.0, parameters in DataParallel replica are no longer
# registered in the _parameters dict, so this mechanism will no longer work.
# Thus for now, only Pytorch<1.5.0 versions are supported if DataParallel is used!
if not isinstance(param, (torch.Tensor, torch.autograd.Variable)):
continue
fix_cfg = _get_fix_cfg(self, n)
fix_grad_cfg = _get_fix_cfg(self, n, grad=True)
set_n, _ = quant.quantize(
param, fix_cfg, fix_grad_cfg, kwarg_cfg=inputs, name=n
)
object.__setattr__(self, n, set_n)
for n, param in six.iteritems(self._buffers):
if not isinstance(param, (torch.Tensor, torch.autograd.Variable)):
continue
fix_cfg = _get_fix_cfg(self, n)
fix_grad_cfg = _get_fix_cfg(self, n, grad=True)
set_n, _ = quant.quantize(
param, fix_cfg, fix_grad_cfg, kwarg_cfg=inputs, name=n
)
object.__setattr__(self, n, set_n)
res = super(cur_cls, self).forward(inputs["inputs"], **kwargs)
for n, param in six.iteritems(self._buffers):
# set buffer back, as there will be no gradient, just in-place modification
# FIXME: For fixed-point batch norm,
# the running mean/var accumulattion is on quantized mean/var,
# which means it might fail to update the running mean/var
# if the updating momentum is too small
updated_buffer = getattr(self, n)
if updated_buffer is not self._buffers[n]:
self._buffers[n].copy_(updated_buffer)
return res
return fix_forward
class FixMeta(type):
def __new__(mcs, name, bases, attrs):
# Construct class name
if not attrs.get("__register_name__", None):
attrs["__register_name__"] = bases[0].__name__ + "_fix"
name = attrs["__register_name__"]
cls = super(FixMeta, mcs).__new__(mcs, name, bases, attrs)
# if already subclass
if not isinstance(bases[0], FixMeta):
cls.forward = get_fix_forward(cur_cls=cls)
setattr(nn_fix, name, cls)
return cls
def register_fix_module(cls, register_name=None):
@six.add_metaclass(FixMeta)
class __a_not_use_name(cls):
__register_name__ = register_name
def __init__(self, *args, **kwargs):
kwargs = _get_kwargs(self, kwargs)
# Pop and parse fix configuration from kwargs
assert "nf_fix_params" in kwargs and isinstance(
kwargs["nf_fix_params"], dict
), (
"Must specifiy `nf_fix_params` keyword arguments, "
"and `nf_fix_params_grad` is optional."
)
self.nf_fix_params = kwargs.pop("nf_fix_params")
self.nf_fix_params_grad = kwargs.pop("nf_fix_params_grad", {}) or {}
cls.__init__(self, *args, **kwargs)
_register_fix_buffers(self, patch_register=True)
# avail_keys = list(self._parameters.keys()) + list(self._buffers.keys())
# self.nf_fix_params = {k: self.nf_fix_params[k]
# for k in avail_keys if k in self.nf_fix_params}
# self.nf_fix_params_grad = {k: self.nf_fix_params_grad[k]
# for k in avail_keys if k in self.nf_fix_params_grad}
class Activation_fix(Module):
def __init__(self, **kwargs):
super(Activation_fix, self).__init__()
kwargs = _get_kwargs(self, kwargs)
assert "nf_fix_params" in kwargs and isinstance(
kwargs["nf_fix_params"], dict
), "Must specifiy `nf_fix_params` keyword arguments, and `nf_fix_params_grad` is optional."
self.nf_fix_params = kwargs.pop("nf_fix_params")
self.nf_fix_params_grad = kwargs.pop("nf_fix_params_grad", {}) or {}
self.activation = None
# register scale as buffers
_register_fix_buffers(self, patch_register=False)
def forward(self, inputs):
if not isinstance(inputs, dict):
inputs = {"inputs": inputs}
name = "activation"
fix_cfg = self.nf_fix_params.get(name, {})
fix_grad_cfg = self.nf_fix_params_grad.get(name, {})
self.activation, _ = quant.quantize(
inputs["inputs"], fix_cfg, fix_grad_cfg, kwarg_cfg=inputs, name=name
)
return self.activation
class ConvBN_fix(Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
**kwargs
):
super(ConvBN_fix, self).__init__()
kwargs = _get_kwargs(self, kwargs)
assert "nf_fix_params" in kwargs and isinstance(
kwargs["nf_fix_params"], dict
), "Must specifiy `nf_fix_params` keyword arguments, and `nf_fix_params_grad` is optional."
self.nf_fix_params = kwargs.pop("nf_fix_params")
self.nf_fix_params_grad = kwargs.pop("nf_fix_params_grad", {}) or {}
if self.nf_fix_params_grad:
warnings.warn(
"Gradient fixed-point cfgs will NOT take effect! Because, "
"Gradient quantization is usually used to simulate training on hardware. "
"However, merged ConvBN is designed for mitigating the discrepancy between "
"training and behaviour on deploy-only hardware; "
"and enable relatively more accurate running mean/var accumulation "
"during software training. Use these two together might not make sense."
)
# init the two floating-point sub-modules
self.conv = torch.nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias=False,
)
self.bn = torch.nn.BatchNorm2d(
out_channels, eps, momentum, affine, track_running_stats
)
# conv and bn attributes
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.kernel_size = self.conv.kernel_size
self.in_channels = in_channels
self.out_channels = out_channels
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
# the quantized combined weights and bias
self.weight = self.conv.weight
self.bias = self.bn.bias
# register scale as buffers
_register_fix_buffers(self, patch_register=False)
def forward(self, inputs):
if self.training:
out = self.conv(inputs)
# dummy output, just to accumulate running mean and running var (floating-point)
_ = self.bn(out)
# calculate batch var/mean
mean = torch.mean(out, dim=[0, 2, 3])
var = torch.var(out, dim=[0, 2, 3])
else:
mean = self.bn.running_mean
var = self.bn.running_var
inputs = {"inputs": inputs}
# parameters/buffers to be combined
bn_scale = self.bn.weight
bn_bias = self.bn.bias
bn_eps = self.bn.eps
conv_weight = self.conv.weight
conv_bias = self.conv.bias or 0.0 # could be None
# combine new weights/bias
comb_weight = conv_weight * (bn_scale / torch.sqrt(var + bn_eps)).view(
-1, 1, 1, 1
)
comb_bias = bn_bias + (conv_bias - mean) * bn_scale / torch.sqrt(var + bn_eps)
# quantize the combined weights/bias (as what would be done in hardware deploy scenario)
comb_weight, _ = quant.quantize(
comb_weight,
self.nf_fix_params.get("weight", {}),
{},
kwarg_cfg=inputs,
name="weight",
)
comb_bias, _ = quant.quantize(
comb_bias,
self.nf_fix_params.get("bias", {}),
{},
kwarg_cfg=inputs,
name="bias",
)
# run the fixed-point combined convbn
convbn_out = F.conv2d(
inputs["inputs"],
comb_weight,
comb_bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
object.__setattr__(self, "weight", comb_weight)
object.__setattr__(self, "bias", comb_bias)
return convbn_out
class FixTopModule(Module):
"""
A module with some simple fix configuration manage utilities.
"""
def __init__(self, *args, **kwargs):
super(FixTopModule, self).__init__(*args, **kwargs)
# To be portable between python2/3, use staticmethod for these utility methods,
# and patch instance method here.
# As Python2 do not support binding instance method to a class that is not a FixTopModule
self.fix_state_dict = FixTopModule.fix_state_dict.__get__(self)
self.load_fix_configs = FixTopModule.load_fix_configs.__get__(self)
self.get_fix_configs = FixTopModule.get_fix_configs.__get__(self)
self.print_fix_configs = FixTopModule.print_fix_configs.__get__(self)
self.set_fix_method = FixTopModule.set_fix_method.__get__(self)
@staticmethod
def fix_state_dict(self, destination=None, prefix="", keep_vars=False):
r"""FIXME: maybe do another quantization to make sure all vars are quantized?
Returns a dictionary containing a whole fixed-point state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
Returns:
dict:
a dictionary containing a whole state of the module
Example::
>>> module.state_dict().keys()
['bias', 'weight']
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=self._version
)
for name, param in self._parameters.items():
if param is not None:
if isinstance(self.__class__, FixMeta): # A fixed-point module
# Get the last used version of the parameters
thevar = getattr(self, name)
else:
thevar = param
destination[prefix + name] = thevar if keep_vars else thevar.data
for name, buf in self._buffers.items():
if buf is not None:
if isinstance(self.__class__, FixMeta): # A fixed-point module
# Get the last saved version of the buffers,
# which can be of float precision
# (as buffers will be turned into fixed-point precision on the next forward)
thevar = getattr(self, name)
else:
thevar = buf
destination[prefix + name] = thevar if keep_vars else thevar.data
for name, module in self._modules.items():
if module is not None:
FixTopModule.fix_state_dict(
module, destination, prefix + name + ".", keep_vars=keep_vars
)
for hook in self._state_dict_hooks.values():
hook_result = hook(self, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
@staticmethod
def load_fix_configs(self, cfgs, grad=False):
assert isinstance(cfgs, | |
== "eks":
if self.settings["AWS_LB_TYPE"] == "nlb":
if self.settings["USE_ARN"] == "Y":
svc_nlb_yaml = self.output_yaml_directory.joinpath("nginx/nlb-service.yaml")
svc_nlb_yaml_parser = Parser(svc_nlb_yaml, "Service")
svc_nlb_yaml_parser["metadata"]["annotations"].update(
{"service.beta.kubernetes.io/aws-load-balancer-ssl-cert": self.settings["ARN_AWS_IAM"]})
svc_nlb_yaml_parser["metadata"]["annotations"].update(
{"service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled": '"true"'})
svc_nlb_yaml_parser["metadata"]["annotations"].update({
"service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy":
"ELBSecurityPolicy-TLS-1-1-2017-01"})
svc_nlb_yaml_parser["metadata"]["annotations"].update(
{"service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "http"})
svc_nlb_yaml_parser["metadata"]["annotations"].update(
{"service.beta.kubernetes.io/aws-load-balancer-ssl-ports": "https"})
svc_nlb_yaml_parser.dump_it()
self.kubernetes.create_objects_from_dict(self.output_yaml_directory.joinpath("nginx/nlb-service.yaml"))
else:
if self.settings["USE_ARN"] == "Y":
svc_l7_yaml = self.output_yaml_directory.joinpath("nginx/service-l7.yaml")
svc_l7_yaml_parser = Parser(svc_l7_yaml, "Service")
svc_l7_yaml_parser["metadata"]["annotations"][
"service.beta.kubernetes.io/aws-load-balancer-ssl-cert"] = self.settings["ARN_AWS_IAM"]
svc_l7_yaml_parser.dump_it()
self.kubernetes.create_objects_from_dict(svc_l7_yaml)
self.kubernetes.delete_config_map_using_name("nginx-configuration", "ingress-nginx")
time.sleep(5)
self.kubernetes.create_objects_from_dict(self.output_yaml_directory.
joinpath("nginx/patch-configmap-l7.yaml"))
else:
self.kubernetes.delete_config_map_using_name("nginx-configuration", "ingress-nginx")
time.sleep(5)
self.kubernetes.create_objects_from_dict(self.output_yaml_directory.
joinpath("nginx/service-l4.yaml"))
self.kubernetes.create_objects_from_dict(self.output_yaml_directory.
joinpath("nginx/patch-configmap-l4.yaml"))
self.wait_for_nginx_add()
if self.settings["DEPLOYMENT_ARCH"] == "gke" or self.settings["DEPLOYMENT_ARCH"] == "aks" \
or self.settings["DEPLOYMENT_ARCH"] == "do" or self.settings["DEPLOYMENT_ARCH"] == "local":
self.kubernetes.create_objects_from_dict(self.output_yaml_directory.joinpath("nginx/cloud-generic.yaml"))
self.wait_for_nginx_add()
if self.settings["DEPLOYMENT_ARCH"] == "eks" or self.settings["DEPLOYMENT_ARCH"] == "local":
self.wait_for_nginx_add()
cm_parser = Parser(self.config_yaml, "ConfigMap", "gluu-config-cm")
cm_parser["data"]["LB_ADDR"] = self.settings["LB_ADD"]
cm_parser.dump_it()
ingress_name_list = ["gluu-ingress-base", "gluu-ingress-openid-configuration",
"gluu-ingress-uma2-configuration", "gluu-ingress-webfinger",
"gluu-ingress-simple-web-discovery", "gluu-ingress-scim-configuration",
"gluu-ingress-fido-u2f-configuration", "gluu-ingress", "gluu-ingress-stateful",
"gluu-casa", "gluu-ingress-fido2-configuration"]
ingress_file = self.output_yaml_directory.joinpath("nginx/nginx.yaml")
for ingress_name in ingress_name_list:
parser = Parser(ingress_file, "Ingress", ingress_name)
parser["spec"]["tls"][0]["hosts"][0] = self.settings["GLUU_FQDN"]
parser["spec"]["rules"][0]["host"] = self.settings["GLUU_FQDN"]
parser.dump_it()
self.kubernetes.create_objects_from_dict(ingress_file, self.settings["GLUU_NAMESPACE"])
def deploy_postgres(self):
self.uninstall_postgres()
self.kubernetes.create_namespace(name=self.settings["POSTGRES_NAMESPACE"], labels={"app": "postgres"})
postgres_init_sql = "CREATE USER {};\nALTER USER {} PASSWORD '{}';\nCREATE USER {};\n" \
"ALTER USER {} PASSWORD '{}';\nCREATE DATABASE {};\n" \
"GRANT ALL PRIVILEGES ON DATABASE {} TO {};\nCREATE DATABASE {};\n" \
"GRANT ALL PRIVILEGES ON DATABASE {} TO {};"\
.format(self.settings["KONG_PG_USER"],
self.settings["KONG_PG_USER"],
self.settings["KONG_PG_PASSWORD"],
self.settings["GLUU_GATEWAY_UI_PG_USER"],
self.settings["GLUU_GATEWAY_UI_PG_USER"],
self.settings["GLUU_GATEWAY_UI_PG_PASSWORD"],
self.settings["KONG_DATABASE"],
self.settings["KONG_DATABASE"],
self.settings["KONG_PG_USER"],
self.settings["GLUU_GATEWAY_UI_DATABASE"],
self.settings["GLUU_GATEWAY_UI_DATABASE"],
self.settings["GLUU_GATEWAY_UI_PG_USER"]
)
encoded_postgers_init_bytes = base64.b64encode(postgres_init_sql.encode("utf-8"))
encoded_postgers_init_string = str(encoded_postgers_init_bytes, "utf-8")
self.kubernetes.patch_or_create_namespaced_secret(name="pg-init-sql",
namespace=self.settings["POSTGRES_NAMESPACE"],
literal="data.sql",
value_of_literal=encoded_postgers_init_string)
postgres_storage_class = Path("./postgres/storageclasses.yaml")
self.analyze_storage_class(postgres_storage_class)
self.kubernetes.create_objects_from_dict(postgres_storage_class)
postgres_yaml = Path("./postgres/postgres.yaml")
postgres_parser = Parser(postgres_yaml, "Postgres")
postgres_parser["spec"]["replicas"] = self.settings["POSTGRES_REPLICAS"]
postgres_parser["spec"]["monitor"]["prometheus"]["namespace"] = self.settings["POSTGRES_NAMESPACE"]
postgres_parser["metadata"]["namespace"] = self.settings["POSTGRES_NAMESPACE"]
if self.settings["DEPLOYMENT_ARCH"] == "microk8s" or \
self.settings["DEPLOYMENT_ARCH"] == "minikube" or \
self.settings["TEST_ENVIRONMENT"] == "Y":
try:
del postgres_parser["spec"]["podTemplate"]["spec"]["resources"]
except KeyError:
logger.info("Resources not deleted as they are not found inside yaml.")
postgres_parser.dump_it()
self.kubernetes.create_namespaced_custom_object(filepath=postgres_yaml,
group="kubedb.com",
version="v1alpha1",
plural="postgreses",
namespace=self.settings["POSTGRES_NAMESPACE"])
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["POSTGRES_NAMESPACE"], "app=postgres", self.timeout)
def deploy_kong_init(self):
self.kubernetes.create_namespace(name=self.settings["KONG_NAMESPACE"], labels={"app": "ingress-kong"})
encoded_kong_pass_bytes = base64.b64encode(self.settings["KONG_PG_PASSWORD"].encode("utf-8"))
encoded_kong_pass_string = str(encoded_kong_pass_bytes, "utf-8")
self.kubernetes.patch_or_create_namespaced_secret(name="kong-postgres-pass",
namespace=self.settings["KONG_NAMESPACE"],
literal="KONG_PG_PASSWORD",
value_of_literal=encoded_kong_pass_string)
kong_init_job = Path("./gluu-gateway-ui/kong-init-job.yaml")
kong_init_job_parser = Parser(kong_init_job, "Job")
kong_init_job_parser["spec"]["template"]["spec"]["containers"][0]["env"] = [
{"name": "KONG_DATABASE", "value": "postgres"},
{"name": "KONG_PG_HOST", "value": self.settings["POSTGRES_URL"]},
{"name": "KONG_PG_USER", "value": self.settings["KONG_PG_USER"]},
{"name": "KONG_PG_PASSWORD", "valueFrom": {"secretKeyRef": {"name": "kong-postgres-pass",
"key": "KONG_PG_PASSWORD"}}}
]
kong_init_job_parser["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_init_job_parser["spec"]["template"]["spec"]["containers"][0]["image"] = \
self.settings["GLUU_GATEWAY_IMAGE_NAME"] + ":" + self.settings["GLUU_GATEWAY_IMAGE_TAG"]
kong_init_job_parser.dump_it()
self.kubernetes.create_objects_from_dict(kong_init_job)
def deploy_kong(self):
self.uninstall_kong()
self.deploy_kong_init()
kong_all_in_one_db = Path("./gluu-gateway-ui/kong-all-in-one-db.yaml")
kong_all_in_one_db_parser_sa = Parser(kong_all_in_one_db, "ServiceAccount")
kong_all_in_one_db_parser_sa["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_all_in_one_db_parser_sa.dump_it()
kong_all_in_one_db_parser_crb = Parser(kong_all_in_one_db, "ClusterRoleBinding")
kong_all_in_one_db_parser_crb["subjects"][0]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_all_in_one_db_parser_crb.dump_it()
kong_all_in_one_db_parser_svc_proxy = Parser(kong_all_in_one_db, "Service", "kong-proxy")
kong_all_in_one_db_parser_svc_proxy["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_all_in_one_db_parser_svc_proxy.dump_it()
kong_all_in_one_db_parser_svc_webhook = Parser(kong_all_in_one_db, "Service", "kong-validation-webhook")
kong_all_in_one_db_parser_svc_webhook["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_all_in_one_db_parser_svc_webhook.dump_it()
kong_all_in_one_db_parser_svc_admin = Parser(kong_all_in_one_db, "Service", "kong-admin")
kong_all_in_one_db_parser_svc_admin["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
kong_all_in_one_db_parser_svc_admin.dump_it()
kong_all_in_one_db_parser_deploy = Parser(kong_all_in_one_db, "Deployment")
kong_containers = kong_all_in_one_db_parser_deploy["spec"]["template"]["spec"]["containers"]
kong_all_in_one_db_parser_deploy["metadata"]["namespace"] = self.settings["KONG_NAMESPACE"]
proxy_index = 0
ingress_controller_index = 1
for container in kong_containers:
if container["name"] == "proxy":
proxy_index = kong_containers.index(container)
if container["name"] == "ingress-controller":
ingress_controller_index = kong_containers.index(container)
# Adjust proxy container envs
env_list = kong_all_in_one_db_parser_deploy["spec"]["template"]["spec"]["containers"][proxy_index]["env"]
for env in env_list:
if env["name"] == "KONG_PG_HOST":
env_list.remove(env)
if env["name"] == "KONG_PG_USER":
env_list.remove(env)
env_list.append({"name": "KONG_PG_HOST", "value": self.settings["POSTGRES_URL"]})
env_list.append({"name": "KONG_PG_USER", "value": self.settings["KONG_PG_USER"]})
# Adjust kong ingress controller envs
env_list = \
kong_all_in_one_db_parser_deploy["spec"]["template"]["spec"]["containers"][ingress_controller_index]["env"]
for env in env_list:
if env["name"] == "CONTROLLER_PUBLISH_SERVICE":
env_list.remove(env)
env_list.append({"name": "CONTROLLER_PUBLISH_SERVICE", "value":
self.settings["KONG_NAMESPACE"] + "/kong-proxy"})
kong_all_in_one_db_parser_deploy["spec"]["template"]["spec"]["containers"][ingress_controller_index]["env"] \
= env_list
for container in kong_containers:
if container["name"] == "proxy":
container["image"] = self.settings["GLUU_GATEWAY_IMAGE_NAME"] + ":" + \
self.settings["GLUU_GATEWAY_IMAGE_TAG"]
kong_all_in_one_db_parser_deploy.dump_it()
self.kubernetes.create_objects_from_dict(kong_all_in_one_db)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["KONG_NAMESPACE"], "app=ingress-kong", self.timeout)
def deploy_gluu_gateway_ui(self):
self.kubernetes.create_namespace(name=self.settings["GLUU_GATEWAY_UI_NAMESPACE"],
labels={"APP_NAME": "gluu-gateway-ui"})
self.setup_tls(namespace=self.settings["GLUU_GATEWAY_UI_NAMESPACE"])
encoded_gg_ui_pg_pass_bytes = base64.b64encode(self.settings["GLUU_GATEWAY_UI_PG_PASSWORD"].encode("utf-8"))
encoded_gg_ui_pg_pass_string = str(encoded_gg_ui_pg_pass_bytes, "utf-8")
self.kubernetes.patch_or_create_namespaced_secret(name="gg-ui-postgres-pass",
namespace=self.settings["GLUU_GATEWAY_UI_NAMESPACE"],
literal="DB_PASSWORD",
value_of_literal=encoded_gg_ui_pg_pass_string)
self.kubernetes.create_objects_from_dict(self.gg_ui_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_GATEWAY_UI_NAMESPACE"],
"app=gg-kong-ui", self.timeout)
def uninstall_gluu_gateway_ui(self):
self.kubernetes.delete_deployment_using_label(self.settings["GLUU_GATEWAY_UI_NAMESPACE"], "app=gg-kong-ui")
self.kubernetes.delete_config_map_using_label(self.settings["GLUU_GATEWAY_UI_NAMESPACE"], "app=gg-kong-ui")
self.kubernetes.delete_job(self.settings["GLUU_GATEWAY_UI_NAMESPACE"], "app=gg-kong-ui")
self.kubernetes.delete_service("gg-kong-ui", self.settings["GLUU_GATEWAY_UI_NAMESPACE"])
self.kubernetes.delete_ingress("gluu-gg-ui", self.settings["GLUU_GATEWAY_UI_NAMESPACE"])
def install_gluu_gateway_dbmode(self):
self.deploy_postgres()
self.deploy_kong()
self.kustomize_gluu_gateway_ui()
self.adjust_fqdn_yaml_entries()
self.deploy_gluu_gateway_ui()
def deploy_redis(self):
self.uninstall_redis()
self.kubernetes.create_namespace(name=self.settings["REDIS_NAMESPACE"], labels={"app": "redis"})
redis_storage_class = Path("./redis/storageclasses.yaml")
self.analyze_storage_class(redis_storage_class)
self.kubernetes.create_objects_from_dict(redis_storage_class)
redis_configmap = Path("./redis/configmaps.yaml")
redis_conf_parser = Parser(redis_configmap, "ConfigMap")
redis_conf_parser["metadata"]["namespace"] = self.settings["REDIS_NAMESPACE"]
redis_conf_parser.dump_it()
self.kubernetes.create_objects_from_dict(redis_configmap)
redis_yaml = Path("./redis/redis.yaml")
redis_parser = Parser(redis_yaml, "Redis")
redis_parser["spec"]["cluster"]["master"] = self.settings["REDIS_MASTER_NODES"]
redis_parser["spec"]["cluster"]["replicas"] = self.settings["REDIS_NODES_PER_MASTER"]
redis_parser["spec"]["monitor"]["prometheus"]["namespace"] = self.settings["REDIS_NAMESPACE"]
redis_parser["metadata"]["namespace"] = self.settings["REDIS_NAMESPACE"]
if self.settings["DEPLOYMENT_ARCH"] == "microk8s" or \
self.settings["DEPLOYMENT_ARCH"] == "minikube" or \
self.settings["DEPLOYMENT_ARCH"] == "local":
del redis_parser["spec"]["podTemplate"]["spec"]["resources"]
redis_parser.dump_it()
self.kubernetes.create_namespaced_custom_object(filepath=redis_yaml,
group="kubedb.com",
version="v1alpha1",
plural="redises",
namespace=self.settings["REDIS_NAMESPACE"])
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=redis-cluster", self.timeout)
def deploy_config(self):
self.kubernetes.create_objects_from_dict(self.config_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=config-init-load", self.timeout)
def deploy_ldap(self):
self.kubernetes.create_objects_from_dict(self.ldap_yaml)
logger.info("Deploying LDAP.Please wait..")
time.sleep(10)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=opendj", self.timeout)
def deploy_jackrabbit(self):
self.kubernetes.create_objects_from_dict(self.jackrabbit_yaml)
logger.info("Deploying Jackrabbit content repository.Please wait..")
time.sleep(10)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=jackrabbit", self.timeout)
def deploy_persistence(self):
self.kubernetes.create_objects_from_dict(self.persistence_yaml)
logger.info("Trying to import ldifs...")
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=persistence-load", self.timeout)
if self.settings["PERSISTENCE_BACKEND"] == "hybrid" or \
self.settings["PERSISTENCE_BACKEND"] == "ldap":
self.kubernetes.patch_namespaced_stateful_set_scale(name="opendj",
replicas=self.settings["LDAP_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=opendj", self.timeout)
def deploy_update_lb_ip(self):
self.kubernetes.create_objects_from_dict(self.update_lb_ip_yaml)
def deploy_oxauth(self):
self.kubernetes.create_objects_from_dict(self.oxauth_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=oxauth", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="oxauth", replicas=self.settings["OXAUTH_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_fido2(self):
self.kubernetes.create_objects_from_dict(self.fido2_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=fido2", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="fido2", replicas=self.settings["FIDO2_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_scim(self):
self.kubernetes.create_objects_from_dict(self.scim_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=scim", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="scim", replicas=self.settings["SCIM_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_oxd(self):
self.kubernetes.create_objects_from_dict(self.oxd_server_yaml)
self.kubernetes.create_objects_from_dict(Path("./oxd-server/base/networkpolicy.yaml"),
self.settings["GLUU_NAMESPACE"])
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=oxd-server", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="oxd-server",
replicas=self.settings["OXD_SERVER_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_casa(self):
self.kubernetes.create_objects_from_dict(self.casa_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=casa", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="casa", replicas=self.settings["CASA_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_oxtrust(self):
self.kubernetes.create_objects_from_dict(self.oxtrust_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=oxtrust", self.timeout)
self.kubernetes.patch_namespaced_stateful_set_scale(name="oxtrust", replicas=self.settings["OXTRUST_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_oxshibboleth(self):
self.kubernetes.create_objects_from_dict(self.oxshibboleth_yaml)
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=oxshibboleth", self.timeout)
self.kubernetes.patch_namespaced_stateful_set_scale(name="oxshibboleth",
replicas=self.settings["OXSHIBBOLETH_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_oxpassport(self):
self.kubernetes.create_objects_from_dict(self.oxpassport_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=oxpassport", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="oxpassport",
replicas=self.settings["OXPASSPORT_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_oxauth_key_rotation(self):
self.kubernetes.create_objects_from_dict(self.oxauth_key_rotate_yaml)
def deploy_radius(self):
self.kubernetes.create_objects_from_dict(self.radius_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=radius", self.timeout)
self.kubernetes.patch_namespaced_deployment_scale(name="radius", replicas=self.settings["RADIUS_REPLICAS"],
namespace=self.settings["GLUU_NAMESPACE"])
def deploy_cr_rotate(self):
self.kubernetes.delete_role("gluu-role", self.settings["GLUU_NAMESPACE"])
self.kubernetes.delete_role_binding("gluu-rolebinding", self.settings["GLUU_NAMESPACE"])
self.kubernetes.delete_cluster_role_binding("gluu-rolebinding")
time.sleep(10)
self.kubernetes.create_objects_from_dict(self.cr_rotate_yaml)
def copy_configs_before_restore(self):
self.gluu_secret = self.kubernetes.read_namespaced_secret("gluu", self.settings["GLUU_NAMESPACE"]).data
self.gluu_config = self.kubernetes.read_namespaced_configmap("gluu", self.settings["GLUU_NAMESPACE"]).data
def save_a_copy_of_config(self):
self.kubernetes.patch_or_create_namespaced_secret(name="secret-params", literal=None, value_of_literal=None,
namespace=self.settings["GLUU_NAMESPACE"],
data=self.gluu_secret)
self.kubernetes.patch_or_create_namespaced_configmap(name="config-params",
namespace=self.settings["GLUU_NAMESPACE"],
data=self.gluu_config)
def mount_config(self):
self.kubernetes.patch_or_create_namespaced_secret(name="gluu", literal=None, value_of_literal=None,
namespace=self.settings["GLUU_NAMESPACE"],
data=self.gluu_secret)
self.kubernetes.patch_or_create_namespaced_configmap(name="gluu",
namespace=self.settings["GLUU_NAMESPACE"],
data=self.gluu_config)
def run_backup_command(self):
try:
exec_ldap_command = ["/opt/opendj/bin/import-ldif", "-n", "userRoot",
"-l", "/opt/opendj/ldif/backup-this-copy.ldif",
"--bindPassword", self.settings["LDAP_PW"]]
self.kubernetes.connect_get_namespaced_pod_exec(exec_command=exec_ldap_command,
app_label="app=opendj",
namespace=self.settings["GLUU_NAMESPACE"])
except (ConnectionError, Exception):
pass
def setup_backup_ldap(self):
encoded_ldap_pw_bytes = base64.b64encode(self.settings["LDAP_PW"].encode("utf-8"))
encoded_ldap_pw_string = str(encoded_ldap_pw_bytes, "utf-8")
self.kubernetes.patch_or_create_namespaced_secret(name="ldap-auth",
namespace=self.settings["GLUU_NAMESPACE"],
literal="password",
value_of_literal=encoded_ldap_pw_string)
kustomize_parser = Parser("ldap/backup/kustomization.yaml", "Kustomization")
kustomize_parser["namespace"] = self.settings["GLUU_NAMESPACE"]
kustomize_parser["configMapGenerator"][0]["literals"] = ["GLUU_LDAP_AUTO_REPLICATE=" + self.settings[
"GLUU_CACHE_TYPE"], "GLUU_CONFIG_KUBERNETES_NAMESPACE=" + self.settings["GLUU_NAMESPACE"],
"GLUU_SECRET_KUBERNETES_NAMESPACE=" + self.settings[
"GLUU_NAMESPACE"],
"GLUU_CONFIG_ADAPTER=kubernetes",
"GLUU_SECRET_ADAPTER=kubernetes",
"GLUU_LDAP_INIT='true'",
"GLUU_LDAP_INIT_HOST='opendj'",
"GLUU_LDAP_INIT_PORT='1636'",
"GLUU_CERT_ALT_NAME='opendj'",
"GLUU_PERSISTENCE_LDAP_MAPPING=" + self.settings[
"HYBRID_LDAP_HELD_DATA"],
"GLUU_PERSISTENCE_TYPE=" + self.settings[
"PERSISTENCE_BACKEND"]]
kustomize_parser.dump_it()
cron_job_parser = Parser("ldap/backup/cronjobs.yaml", "CronJob")
cron_job_parser["spec"]["schedule"] = self.settings["LDAP_BACKUP_SCHEDULE"]
cron_job_parser.dump_it()
command = self.kubectl + " kustomize ldap/backup"
exec_cmd(command, output_file="./ldap-backup.yaml")
self.kubernetes.create_objects_from_dict("./ldap-backup.yaml")
def upgrade(self):
self.kustomize_gluu_upgrade()
self.adjust_fqdn_yaml_entries()
self.kubernetes.create_objects_from_dict(self.gluu_upgrade_yaml)
if not self.settings["AWS_LB_TYPE"] == "alb":
self.kubernetes.check_pods_statuses(self.settings["GLUU_NAMESPACE"], "app=gluu-upgrade", self.timeout)
casa_image = self.settings["CASA_IMAGE_NAME"] + ":" + self.settings["CASA_IMAGE_TAG"]
cr_rotate_image = self.settings["CACHE_REFRESH_ROTATE_IMAGE_NAME"] + ":" + self.settings[
"CACHE_REFRESH_ROTATE_IMAGE_TAG"]
cert_manager_image = self.settings["CERT_MANAGER_IMAGE_NAME"] + ":" + self.settings["CERT_MANAGER_IMAGE_TAG"]
ldap_image = self.settings["LDAP_IMAGE_NAME"] + ":" + self.settings["LDAP_IMAGE_TAG"]
oxauth_image = self.settings["OXAUTH_IMAGE_NAME"] + ":" + self.settings["OXAUTH_IMAGE_TAG"]
fido2_image = self.settings["FIDO2_IMAGE_NAME"] + ":" + self.settings["FIDO2_IMAGE_TAG"]
scim_image = self.settings["SCIM_IMAGE_NAME"] + ":" + self.settings["SCIM_IMAGE_TAG"]
oxd_image = self.settings["OXD_IMAGE_NAME"] + ":" + self.settings["OXD_IMAGE_TAG"]
oxpassport_image = self.settings["OXPASSPORT_IMAGE_NAME"] + ":" + self.settings["OXPASSPORT_IMAGE_TAG"]
oxshibboleth_image = self.settings["OXSHIBBOLETH_IMAGE_NAME"] + ":" + self.settings["OXSHIBBOLETH_IMAGE_TAG"]
oxtrust_image = self.settings["OXTRUST_IMAGE_NAME"] + ":" + self.settings["OXTRUST_IMAGE_TAG"]
radius_image = self.settings["RADIUS_IMAGE_NAME"] + ":" + self.settings["RADIUS_IMAGE_TAG"]
self.kubernetes.patch_namespaced_deployment(name="casa",
image=casa_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_daemonset(name="cr-rotate",
image=cr_rotate_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="oxauth-key-rotation",
image=cert_manager_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_statefulset(name="opendj",
image=ldap_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="oxauth",
image=oxauth_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="fido2",
image=fido2_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="scim",
image=scim_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="oxd-server",
image=oxd_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="oxpassport",
image=oxpassport_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_statefulset(name="oxshibboleth",
image=oxshibboleth_image,
namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_statefulset(name="oxtrust",
image=oxtrust_image, namespace=self.settings["GLUU_NAMESPACE"])
self.kubernetes.patch_namespaced_deployment(name="radius",
image=radius_image, namespace=self.settings["GLUU_NAMESPACE"])
def install(self, install_couchbase=True, restore=False):
if not restore:
self.kubernetes.create_namespace(name=self.settings["GLUU_NAMESPACE"], labels={"app": "gluu"})
self.kustomize_it()
self.adjust_fqdn_yaml_entries()
if install_couchbase:
if self.settings["PERSISTENCE_BACKEND"] != "ldap":
if self.settings["INSTALL_COUCHBASE"] == "Y":
couchbase_app = Couchbase(self.settings)
couchbase_app.uninstall()
couchbase_app = Couchbase(self.settings)
couchbase_app.install()
else:
encoded_cb_pass_bytes = base64.b64encode(self.settings["COUCHBASE_PASSWORD"].encode("utf-8"))
encoded_cb_pass_string = str(encoded_cb_pass_bytes, "utf-8")
couchbase_app = Couchbase(self.settings)
couchbase_app.create_couchbase_gluu_cert_pass_secrets(self.settings["COUCHBASE_CRT"],
encoded_cb_pass_string)
if not restore:
self.kubernetes = Kubernetes()
if self.settings["AWS_LB_TYPE"] == "alb":
self.prepare_alb()
self.deploy_alb()
else:
self.deploy_nginx()
self.adjust_fqdn_yaml_entries()
if self.settings["DEPLOY_MULTI_CLUSTER"] != "Y" and self.settings["DEPLOY_MULTI_CLUSTER"] != "y":
self.kubernetes = Kubernetes()
if restore:
self.mount_config()
self.save_a_copy_of_config()
else:
self.deploy_config()
if self.settings["INSTALL_JACKRABBIT"] == "Y" and not restore:
self.kubernetes = Kubernetes()
self.deploy_jackrabbit()
if not self.settings["AWS_LB_TYPE"] == "alb":
self.setup_tls(namespace=self.settings["GLUU_NAMESPACE"])
if self.settings["INSTALL_REDIS"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_redis()
if self.settings["PERSISTENCE_BACKEND"] == "hybrid" or \
self.settings["PERSISTENCE_BACKEND"] == "ldap":
self.kubernetes = Kubernetes()
if restore:
self.run_backup_command()
self.mount_config()
self.wait_for_nginx_add()
else:
self.deploy_ldap()
if self.settings["DEPLOYMENT_ARCH"] != "microk8s" and self.settings["DEPLOYMENT_ARCH"] != "minikube":
self.setup_backup_ldap()
if not restore:
self.kubernetes = Kubernetes()
self.deploy_persistence()
if self.settings["IS_GLUU_FQDN_REGISTERED"] != "Y":
if self.settings["DEPLOYMENT_ARCH"] == "eks" or self.settings["DEPLOYMENT_ARCH"] == "local":
self.kubernetes = Kubernetes()
self.deploy_update_lb_ip()
self.kubernetes = Kubernetes()
self.deploy_oxauth()
if self.settings["ENABLE_FIDO2"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_fido2()
if self.settings["ENABLE_SCIM"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_scim()
if self.settings["ENABLE_OXD"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_oxd()
if self.settings["ENABLE_CASA"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_casa()
self.kubernetes = Kubernetes()
self.deploy_oxtrust()
if self.settings["ENABLE_OXSHIBBOLETH"] == "Y":
self.kubernetes = Kubernetes()
self.deploy_oxshibboleth()
| |
" << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM2x2 << endl;
cout << setw(20) << "mdl_conjg__CKM3x2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM3x2 << endl;
cout << setw(20) << "mdl_conjg__CKM1x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM1x1 << endl;
cout << setw(20) << "mdl_conjg__CKM1x2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM1x2 << endl;
cout << setw(20) << "mdl_aEW " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_aEW << endl;
cout << setw(20) << "mdl_MW " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MW << endl;
cout << setw(20) << "mdl_sqrt__aEW " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sqrt__aEW << endl;
cout << setw(20) << "mdl_ee " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ee << endl;
cout << setw(20) << "mdl_MW__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MW__exp__2 << endl;
cout << setw(20) << "mdl_sw2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sw2 << endl;
cout << setw(20) << "mdl_cw " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_cw << endl;
cout << setw(20) << "mdl_sqrt__sw2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sqrt__sw2 << endl;
cout << setw(20) << "mdl_sw " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sw << endl;
cout << setw(20) << "mdl_g1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_g1 << endl;
cout << setw(20) << "mdl_gw " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_gw << endl;
cout << setw(20) << "mdl_vev " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_vev << endl;
cout << setw(20) << "mdl_vev__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_vev__exp__2 << endl;
cout << setw(20) << "mdl_lam " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_lam << endl;
cout << setw(20) << "mdl_yb " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_yb << endl;
cout << setw(20) << "mdl_yc " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_yc << endl;
cout << setw(20) << "mdl_ye " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ye << endl;
cout << setw(20) << "mdl_ym " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ym << endl;
cout << setw(20) << "mdl_yt " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_yt << endl;
cout << setw(20) << "mdl_ytau " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ytau << endl;
cout << setw(20) << "mdl_muH " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_muH << endl;
cout << setw(20) << "mdl_I1x31 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I1x31 << endl;
cout << setw(20) << "mdl_I1x32 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I1x32 << endl;
cout << setw(20) << "mdl_I1x33 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I1x33 << endl;
cout << setw(20) << "mdl_I2x12 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x12 << endl;
cout << setw(20) << "mdl_I2x13 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x13 << endl;
cout << setw(20) << "mdl_I2x22 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x22 << endl;
cout << setw(20) << "mdl_I2x23 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x23 << endl;
cout << setw(20) << "mdl_I2x32 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x32 << endl;
cout << setw(20) << "mdl_I2x33 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I2x33 << endl;
cout << setw(20) << "mdl_I3x21 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x21 << endl;
cout << setw(20) << "mdl_I3x22 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x22 << endl;
cout << setw(20) << "mdl_I3x23 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x23 << endl;
cout << setw(20) << "mdl_I3x31 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x31 << endl;
cout << setw(20) << "mdl_I3x32 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x32 << endl;
cout << setw(20) << "mdl_I3x33 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I3x33 << endl;
cout << setw(20) << "mdl_I4x13 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I4x13 << endl;
cout << setw(20) << "mdl_I4x23 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I4x23 << endl;
cout << setw(20) << "mdl_I4x33 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_I4x33 << endl;
cout << setw(20) << "mdl_ee__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ee__exp__2 << endl;
cout << setw(20) << "mdl_sw__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sw__exp__2 << endl;
cout << setw(20) << "mdl_cw__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_cw__exp__2 << endl;
}
void Parameters_sm::printIndependentCouplings(){
cout << "sm model couplings independent of event kinematics:" << endl;
cout << setw(20) << "GC_1 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_1 << endl;
cout << setw(20) << "GC_2 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_2 << endl;
cout << setw(20) << "GC_3 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_3 << endl;
cout << setw(20) << "GC_4 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_4 << endl;
cout << setw(20) << "GC_5 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_5 << endl;
cout << setw(20) << "GC_6 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_6 << endl;
cout << setw(20) << "GC_7 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_7 << endl;
cout << setw(20) << "GC_8 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_8 << endl;
cout << setw(20) << "GC_9 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_9 << endl;
cout << setw(20) << "GC_13 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_13 << endl;
cout << setw(20) << "GC_14 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_14 << endl;
cout << setw(20) << "GC_15 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_15 << endl;
cout << setw(20) << "GC_16 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_16 << endl;
cout << setw(20) << "GC_17 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_17 << endl;
cout << setw(20) << "GC_18 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_18 << endl;
cout << setw(20) << "GC_19 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_19 << endl;
cout << setw(20) << "GC_20 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_20 << endl;
cout << setw(20) << "GC_21 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_21 << endl;
cout << setw(20) << "GC_22 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_22 << endl;
cout << setw(20) << "GC_23 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_23 << endl;
cout << setw(20) << "GC_24 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_24 << endl;
cout << setw(20) << "GC_25 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_25 << endl;
cout << setw(20) << "GC_26 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_26 << endl;
cout << setw(20) << "GC_27 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_27 << endl;
cout << setw(20) << "GC_28 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_28 << endl;
cout << setw(20) << "GC_29 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_29 << endl;
cout << setw(20) << "GC_30 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_30 << endl;
cout << setw(20) << "GC_31 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_31 << endl;
cout << setw(20) << "GC_32 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_32 << endl;
cout << setw(20) << "GC_33 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_33 << endl;
cout << setw(20) << "GC_34 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_34 << endl;
cout << setw(20) << "GC_35 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_35 << endl;
cout << setw(20) << "GC_36 " << "= " << setiosflags(ios::scientific) << setw(10) << GC_36 << endl;
cout << setw(20) << "GC_37 " << "= " << | |
from statistics import mean
import json
data = {
"Kingston Fossil Plant22": {
"2010": [
{
"contaminant": "manganese",
"concentration": "2.15"
}
],
"2011": [
{
"contaminant": "manganese",
"concentration": "1.83"
}
],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "arsenic",
"concentration": "0.0115"
}
],
"2015": [
{
"contaminant": "arsenic",
"concentration": "0.0696"
}
],
"2016": [
{
"contaminant": "arsenic",
"concentration": "0.0112"
},
{
"contaminant": "arsenic",
"concentration": "0.0119"
}
],
"2017": [],
"latitude": "35.909305",
"longitude": "-84.504861"
},
"Kingston Fossil Plant6AR": {
"2010": [
{
"contaminant": "cobalt",
"concentration": "0.0871"
},
{
"contaminant": "cobalt",
"concentration": "0.0991"
},
{
"contaminant": "cobalt",
"concentration": "0.104"
},
{
"contaminant": "manganese",
"concentration": "26.9"
}
],
"2011": [
{
"contaminant": "cobalt",
"concentration": "0.111"
},
{
"contaminant": "cobalt",
"concentration": "0.0842"
},
{
"contaminant": "manganese",
"concentration": "35.8"
}
],
"2012": [
{
"contaminant": "cobalt",
"concentration": "0.0968"
},
{
"contaminant": "cobalt",
"concentration": "0.106"
}
],
"2013": [
{
"contaminant": "cobalt",
"concentration": "0.117"
},
{
"contaminant": "cobalt",
"concentration": "0.111"
}
],
"2014": [
{
"contaminant": "cobalt",
"concentration": "0.117"
},
{
"contaminant": "cobalt",
"concentration": "0.12"
}
],
"2015": [
{
"contaminant": "cobalt",
"concentration": "0.121"
},
{
"contaminant": "cobalt",
"concentration": "0.119"
},
{
"contaminant": "cobalt",
"concentration": "0.126"
}
],
"2016": [
{
"contaminant": "cobalt",
"concentration": "0.14"
},
{
"contaminant": "cobalt",
"concentration": "0.13"
},
{
"contaminant": "cobalt",
"concentration": "0.131"
}
],
"2017": [],
"latitude": "35.904582",
"longitude": "-84.504883"
},
"Kingston Fossil PlantAD-1": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.908929",
"longitude": "-84.519528"
},
"Kingston Fossil PlantAD-2": {
"2010": [
{
"contaminant": "cobalt",
"concentration": "0.00642"
},
{
"contaminant": "cobalt",
"concentration": "0.00608"
},
{
"contaminant": "manganese",
"concentration": "0.742"
},
{
"contaminant": "manganese",
"concentration": "0.739"
},
{
"contaminant": "manganese",
"concentration": "0.832"
}
],
"2011": [
{
"contaminant": "cobalt",
"concentration": "0.00684"
},
{
"contaminant": "cobalt",
"concentration": "0.00858"
}
],
"2012": [
{
"contaminant": "cobalt",
"concentration": "0.00998"
},
{
"contaminant": "cobalt",
"concentration": "0.0101"
},
{
"contaminant": "cobalt",
"concentration": "0.0112"
},
{
"contaminant": "manganese",
"concentration": "1.67"
}
],
"2013": [
{
"contaminant": "cobalt",
"concentration": "0.0108"
},
{
"contaminant": "cobalt",
"concentration": "0.00887"
},
{
"contaminant": "cobalt",
"concentration": "0.00689"
},
{
"contaminant": "cobalt",
"concentration": "0.00746"
}
],
"2014": [
{
"contaminant": "cobalt",
"concentration": "0.00798"
},
{
"contaminant": "cobalt",
"concentration": "0.00647"
},
{
"contaminant": "cobalt",
"concentration": "0.00702"
}
],
"2015": [
{
"contaminant": "cobalt",
"concentration": "0.00676"
}
],
"2016": [],
"2017": [],
"latitude": "35.902915",
"longitude": "-84.51499"
},
"Kingston Fossil PlantAD-3": {
"2010": [
{
"contaminant": "manganese",
"concentration": "5.64"
},
{
"contaminant": "manganese",
"concentration": "5.13"
},
{
"contaminant": "manganese",
"concentration": "5.345"
}
],
"2011": [
{
"contaminant": "cobalt",
"concentration": "0.006245"
},
{
"contaminant": "manganese",
"concentration": "13.75"
},
{
"contaminant": "sulfate",
"concentration": "552.0"
}
],
"2012": [
{
"contaminant": "cobalt",
"concentration": "0.00831"
},
{
"contaminant": "manganese",
"concentration": "6.84"
}
],
"2013": [
{
"contaminant": "cobalt",
"concentration": "0.007335"
},
{
"contaminant": "cobalt",
"concentration": "0.00772"
}
],
"2014": [],
"2015": [
{
"contaminant": "cobalt",
"concentration": "0.0077"
},
{
"contaminant": "cobalt",
"concentration": "0.00644"
}
],
"2016": [
{
"contaminant": "cobalt",
"concentration": "0.00631"
},
{
"contaminant": "cobalt",
"concentration": "0.00657"
},
{
"contaminant": "sulfate",
"concentration": "971.0"
},
{
"contaminant": "sulfate",
"concentration": "659.5"
}
],
"2017": [],
"latitude": "35.904124",
"longitude": "-84.511698"
},
"Kingston Fossil PlantKIF-G1B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.896122",
"longitude": "-84.508339"
},
"Kingston Fossil PlantKIF-G3A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.892041",
"longitude": "-84.510798"
},
"Kingston Fossil PlantKIF-G3B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.892041",
"longitude": "-84.510798"
},
"Kingston Fossil PlantKIF-G4B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [
{
"contaminant": "arsenic",
"concentration": "0.0101"
}
],
"2016": [],
"2017": [],
"latitude": "35.891627",
"longitude": "-84.508799"
},
"Kingston Fossil PlantKIF-G5A": {
"2010": [
{
"contaminant": "selenium",
"concentration": "0.379"
}
],
"2011": [
{
"contaminant": "selenium",
"concentration": "0.137"
},
{
"contaminant": "selenium",
"concentration": "0.102"
},
{
"contaminant": "selenium",
"concentration": "0.07275"
},
{
"contaminant": "selenium",
"concentration": "0.0608"
}
],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.891705",
"longitude": "-84.506888"
},
"Kingston Fossil PlantKIF-G5B": {
"2010": [
{
"contaminant": "selenium",
"concentration": "0.412"
}
],
"2011": [
{
"contaminant": "selenium",
"concentration": "0.188"
},
{
"contaminant": "selenium",
"concentration": "0.141"
},
{
"contaminant": "selenium",
"concentration": "0.131"
},
{
"contaminant": "selenium",
"concentration": "0.144"
}
],
"2012": [
{
"contaminant": "selenium",
"concentration": "0.124"
},
{
"contaminant": "selenium",
"concentration": "0.124"
},
{
"contaminant": "selenium",
"concentration": "0.117"
},
{
"contaminant": "selenium",
"concentration": "0.106"
}
],
"2013": [
{
"contaminant": "selenium",
"concentration": "0.0874"
},
{
"contaminant": "selenium",
"concentration": "0.07595"
},
{
"contaminant": "selenium",
"concentration": "0.0672"
},
{
"contaminant": "selenium",
"concentration": "0.059"
}
],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.891705",
"longitude": "-84.506888"
},
"Kingston Fossil PlantKIF-G6B": {
"2010": [
{
"contaminant": "selenium",
"concentration": "0.0993"
}
],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.892141",
"longitude": "-84.504912"
},
"Kingston Fossil PlantKIF-G2A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "arsenic",
"concentration": "0.0451"
},
{
"contaminant": "beryllium",
"concentration": "0.0101"
},
{
"contaminant": "cobalt",
"concentration": "0.0398"
},
{
"contaminant": "lead",
"concentration": "0.149"
},
{
"contaminant": "manganese",
"concentration": "6.27"
}
],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.897368",
"longitude": "-84.505996"
},
"Kingston Fossil PlantKIF-G2B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.897368",
"longitude": "-84.505996"
},
"Kingston Fossil PlantKIF-G7A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.892629",
"longitude": "-84.503279"
},
"Kingston Fossil PlantKIF-G7B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.892629",
"longitude": "-84.503279"
},
"Kingston Fossil PlantKIF-G8A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "cobalt",
"concentration": "0.00656"
},
{
"contaminant": "manganese",
"concentration": "0.307"
}
],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.893625",
"longitude": "-84.500839"
},
"Kingston Fossil PlantKIF-G8B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.893625",
"longitude": "-84.500839"
},
"Kingston Fossil PlantKIF-G9A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "manganese",
"concentration": "1.34"
}
],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.89427",
"longitude": "-84.500234"
},
"Kingston Fossil PlantKIF-G9B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "manganese",
"concentration": "2.1"
}
],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.89427",
"longitude": "-84.500234"
},
"Kingston Fossil PlantKIF-G10A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.895426",
"longitude": "-84.498771"
},
"Kingston Fossil PlantKIF-G10B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.895426",
"longitude": "-84.498771"
},
"Kingston Fossil Plant22B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "35.909305",
"longitude": "-84.504861"
},
"Kingston Fossil Plant27A": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [
{
"contaminant": "cobalt",
"concentration": "0.0168"
},
{
"contaminant": "cobalt",
"concentration": "0.0096"
}
],
"2015": [
{
"contaminant": "cobalt",
"concentration": "0.00843"
},
{
"contaminant": "cobalt",
"concentration": "0.00821"
},
{
"contaminant": "cobalt",
"concentration": "0.00771"
},
{
"contaminant": "cobalt",
"concentration": "0.00788"
}
],
"2016": [
{
"contaminant": "cobalt",
"concentration": "0.00676"
},
{
"contaminant": "cobalt",
"concentration": "0.00711"
}
],
"2017": [],
"latitude": "0.0",
"longitude": "0.0"
},
"Kingston Fossil PlantGW01": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "0.0",
"longitude": "0.0"
},
"Kingston Fossil Plant27B": {
"2010": [],
"2011": [],
"2012": [],
"2013": [],
"2014": [],
"2015": [],
"2016": [],
"2017": [],
"latitude": "0.0",
"longitude": "0.0"
},
"Kingston | |
return @{{x}} * @{{y}}.__v;
case 0x0201:
return @{{x}}.__v * @{{y}};
case 0x0202:
return new @{{int}}(@{{x}}.__v * @{{y}}.__v);
case 0x0204:
return (new @{{long}}(@{{x}}.__v)).__mul(@{{y}});
case 0x0402:
return @{{x}}.__mul(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__mul(@{{y}});
}
if (!@{{x}}.__number__) {
if ( !@{{y}}.__number__
&& @{{y}}.__mro__.__array.length > @{{x}}.__mro__.__array.length
&& @{{isinstance}}(@{{y}}, @{{x}}.__class__)
&& typeof @{{y}}['__rmul__'] == 'function')
return @{{y}}.__rmul__(@{{x}});
if (typeof @{{x}}['__mul__'] == 'function') return @{{x}}.__mul__(@{{y}});
}
if (!@{{y}}.__number__ && typeof @{{y}}['__rmul__'] == 'function') return @{{y}}.__rmul__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for *: '%r', '%r'" % (x, y))
def op_mod(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0101:
case 0x0104:
case 0x0401:
if (@{{y}} == 0) throw $pyce(@{{ZeroDivisionError}}('float divmod()'));
var v = @{{x}} % @{{y}};
return (v < 0 && @{{y}} > 0 ? v + @{{y}} : v);
case 0x0102:
if (@{{y}}.__v == 0) throw $pyce(@{{ZeroDivisionError}}('float divmod()'));
var v = @{{x}} % @{{y}}.__v;
return (v < 0 && @{{y}}.__v > 0 ? v + @{{y}}.__v : v);
case 0x0201:
if (@{{y}} == 0) throw $pyce(@{{ZeroDivisionError}}('float divmod()'));
var v = @{{x}}.__v % @{{y}};
return (v < 0 && @{{y}}.__v > 0 ? v + @{{y}}.__v : v);
case 0x0202:
if (@{{y}}.__v == 0) throw $pyce(@{{ZeroDivisionError}}('integer division or modulo by zero'));
var v = @{{x}}.__v % @{{y}}.__v;
return new @{{int}}(v < 0 && @{{y}}.__v > 0 ? v + @{{y}}.__v : v);
case 0x0204:
return (new @{{long}}(@{{x}}.__v)).__mod(@{{y}});
case 0x0402:
return @{{x}}.__mod(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__mod(@{{y}});
}
if (typeof @{{x}} == 'string') {
return @{{sprintf}}(@{{x}}, @{{y}});
}
if (!@{{x}}.__number__) {
if ( !@{{y}}.__number__
&& @{{y}}.__mro__.__array.length > @{{x}}.__mro__.__array.length
&& @{{isinstance}}(@{{y}}, @{{x}}.__class__)
&& typeof @{{y}}['__rmod__'] == 'function')
return @{{y}}.__rmod__(@{{x}});
if (typeof @{{x}}['__mod__'] == 'function') return @{{x}}.__mod__(@{{y}});
}
if (!@{{y}}.__number__ && typeof @{{y}}['__rmod__'] == 'function') return @{{y}}.__rmod__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for %: '%r', '%r'" % (x, y))
def op_pow(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0101:
case 0x0104:
case 0x0401:
return Math.pow(@{{x}}, @{{y}});
case 0x0102:
return Math.pow(@{{x}},@{{y}}.__v);
case 0x0201:
return Math.pow(@{{x}}.__v,@{{y}});
case 0x0202:
return @{{x}}.__pow__(@{{y}});
case 0x0204:
return (new @{{long}}(@{{x}}.__v)).__pow(@{{y}});
case 0x0402:
return @{{x}}.__pow(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__pow(@{{y}});
}
if (!@{{x}}.__number__) {
if ( !@{{y}}.__number__
&& @{{y}}.__mro__.__array.length > @{{x}}.__mro__.__array.length
&& @{{isinstance}}(@{{y}}, @{{x}}.__class__)
&& typeof @{{y}}['__rpow__'] == 'function')
return @{{y}}.__rpow__(@{{x}});
if (typeof @{{x}}['__pow__'] == 'function') return @{{x}}.__pow__(@{{y}});
}
if (!@{{y}}.__number__ && typeof @{{y}}['__rpow__'] == 'function') return @{{y}}.__rpow__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for %: '%r', '%r'" % (x, y))
def op_invert(v):
JS("""
if (@{{v}} !== null) {
if (typeof @{{v}}['__invert__'] == 'function') return @{{v}}.__invert__();
}
""")
raise TypeError("bad operand type for unary -: '%r'" % v)
def op_bitshiftleft(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0202:
return @{{x}}.__lshift__(@{{y}});
case 0x0204:
return @{{y}}.__rlshift__(@{{x}});
case 0x0402:
return @{{x}}.__lshift(@{{y}}.__v);
case 0x0404:
return @{{x}}.__lshift(@{{y}}.valueOf());
}
if (typeof @{{x}}['__lshift__'] == 'function') {
var v = @{{x}}.__lshift__(@{{y}});
if (v !== @{{NotImplemented}}) return v;
}
if (typeof @{{y}}['__rlshift__'] != 'undefined') return @{{y}}.__rlshift__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for <<: '%r', '%r'" % (x, y))
def op_bitshiftright(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0202:
return @{{x}}.__rshift__(@{{y}});
case 0x0204:
return @{{y}}.__rrshift__(@{{x}});
case 0x0402:
return @{{x}}.__rshift(@{{y}}.__v);
case 0x0404:
return @{{x}}.__rshift(@{{y}}.valueOf());
}
if (typeof @{{x}}['__rshift__'] == 'function') {
var v = @{{x}}.__rshift__(@{{y}});
if (v !== @{{NotImplemented}}) return v;
}
if (typeof @{{y}}['__rrshift__'] != 'undefined') return @{{y}}.__rrshift__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for >>: '%r', '%r'" % (x, y))
def op_bitand2(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0202:
return @{{x}}.__and__(@{{y}});
case 0x0204:
return @{{y}}.__and(new @{{long}}(@{{x}}));
case 0x0402:
return @{{x}}.__and(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__and(@{{y}});
}
if (typeof @{{x}}['__and__'] == 'function') {
var v = @{{x}}.__and__(@{{y}});
if (v !== @{{NotImplemented}}) return v;
}
if (typeof @{{y}}['__rand__'] != 'undefined') return @{{y}}.__rand__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for &: '%r', '%r'" % (x, y))
op_bitand = JS("""function (args) {
var a;
if (args[0] !== null && args[1] !== null && args.length > 1) {
var res, r;
res = args[0];
for (i = 1; i < args.length; i++) {
if (typeof res['__and__'] == 'function') {
r = res;
res = res.__and__(args[i]);
if (res === @{{NotImplemented}} && typeof args[i]['__rand__'] == 'function') {
res = args[i].__rand__(r);
}
} else if (typeof args[i]['__rand__'] == 'function') {
res = args[i].__rand__(res);
} else {
res = null;
break;
}
if (res === @{{NotImplemented}}) {
res = null;
break;
}
}
if (res !== null) {
return res;
}
}
""")
raise TypeError("unsupported operand type(s) for &: " + ', '.join([repr(a) for a in list(args)]))
JS("""
};
""")
def op_bitxor2(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0202:
return @{{x}}.__xor__(@{{y}});
case 0x0204:
return @{{y}}.__xor(new @{{long}}(@{{x}}));
case 0x0402:
return @{{x}}.__xor(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__xor(@{{y}});
}
if (typeof @{{x}}['__xor__'] == 'function') {
var v = @{{x}}.__xor__(@{{y}});
if (v !== @{{NotImplemented}}) return v;
}
if (typeof @{{y}}['__rxor__'] != 'undefined') return @{{y}}.__rxor__(@{{x}});
}
""")
raise TypeError("unsupported operand type(s) for ^: '%r', '%r'" % (x, y))
op_bitxor = JS("""function (args) {
var a;
if (args[0] !== null && args[1] !== null && args.length > 1) {
var res, r;
res = args[0];
for (i = 1; i < args.length; i++) {
if (typeof res['__xor__'] == 'function') {
r = res;
res = res.__xor__(args[i]);
if (res === @{{NotImplemented}} && typeof args[i]['__rxor__'] == 'function') {
res = args[i].__rxor__(r);
}
} else if (typeof args[i]['__rxor__'] == 'function') {
res = args[i].__rxor__(res);
} else {
res = null;
break;
}
if (res === @{{NotImplemented}}) {
res = null;
break;
}
}
if (res !== null) {
return res;
}
}
""")
raise TypeError("unsupported operand type(s) for ^: " + ', '.join([repr(a) for a in args]))
JS("""
};
""")
def op_bitor2(x, y):
JS("""
if (@{{x}} !== null && @{{y}} !== null) {
switch ((@{{x}}.__number__ << 8) | @{{y}}.__number__) {
case 0x0202:
return @{{x}}.__or__(@{{y}});
case 0x0204:
return @{{y}}.__or(new @{{long}}(@{{x}}));
case 0x0402:
return @{{x}}.__or(new @{{long}}(@{{y}}.__v));
case 0x0404:
return @{{x}}.__or(@{{y}});
}
if (typeof @{{x}}['__or__'] == 'function') {
var v = @{{x}}.__or__(@{{y}});
if (v !== @{{NotImplemented}}) return v;
}
if (typeof @{{y}}['__ror__'] != 'undefined') {
return @{{y}}.__ror__(@{{x}});
}
}
""")
raise TypeError("unsupported operand type(s) for |: '%r', '%r'" % (x, y))
op_bitor = JS("""function (args) {
var a;
if (args[0] !== null && args[1] !== null && args.length > 1) {
var res, r;
res = args[0];
for (i = 1; i < args.length; i++) {
if (typeof res['__or__'] == 'function') {
r = res;
res = res.__or__(args[i]);
if (res === @{{NotImplemented}} && typeof args[i]['__ror__'] == 'function') {
res = args[i].__ror__(r);
}
} else if (typeof args[i]['__ror__'] == 'function') {
res = args[i].__ror__(res);
} else {
res = null;
break;
}
if (res === @{{NotImplemented}}) {
res = null;
break;
}
}
if (res !== null) {
return res;
}
}
""")
raise TypeError("unsupported operand type(s) for |: " + ', '.join([repr(a) for a in args]))
JS("""
};
""")
# All modules (do and should) take care of checking their parent:
# - If the parent is not loaded and initialized, call ___import___(parent, null)
# All modules are placed in sys.modules dict
# The module is first tried within the context
# If the depth > 1 (i.e. one or more dots in the path) then:
# Try the parent if it has an object that resolves to [context.]path
# If the module doesn't exist and dynamic loading is enabled, try dynamic loading
def ___import___(path, context, module_name=None, get_base=True):
sys = JS("$pyjs.loaded_modules['sys']")
pyjslib = JS("$pyjs.loaded_modules['pyjslib']")
if JS("@{{sys}}.__was_initialized__ != true"):
module = JS("$pyjs.loaded_modules[@{{path}}]")
module()
if path == 'sys':
module.modules = dict({'pyjslib': pyjslib,
'__builtin__':pyjslib,
'builtins':pyjslib,
'sys': module})
JS("$pyjs.loaded_modules['__builtin__'] = @{{pyjslib}};")
JS("$pyjs.loaded_modules['builtins'] = @{{pyjslib}};")
return module
importName = path
is_module_object = False
path_parts = path.__split('.') # make a javascript Array
depth = JS("@{{path_parts}}.length")
topName = JS("@{{path_parts}}[0]")
objName = JS("@{{path_parts}}[@{{path_parts}}.length-1]")
parentName = path_parts.slice(0, depth-1).join('.')
if context is None:
in_context = | |
finally:
self.exitRule()
return localctx
class FracContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(LaTeXParser.FracContext, self).__init__(parent, invokingState)
self.parser = parser
self.upper = None # ExprContext
self.lower = None # ExprContext
def CMD_FRAC(self):
return self.getToken(LaTeXParser.CMD_FRAC, 0)
def L_BRACE(self, i=None):
if i is None:
return self.getTokens(LaTeXParser.L_BRACE)
else:
return self.getToken(LaTeXParser.L_BRACE, i)
def R_BRACE(self, i=None):
if i is None:
return self.getTokens(LaTeXParser.R_BRACE)
else:
return self.getToken(LaTeXParser.R_BRACE, i)
def expr(self, i=None):
if i is None:
return self.getTypedRuleContexts(LaTeXParser.ExprContext)
else:
return self.getTypedRuleContext(LaTeXParser.ExprContext,i)
def getRuleIndex(self):
return LaTeXParser.RULE_frac
def frac(self):
localctx = LaTeXParser.FracContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_frac)
try:
self.enterOuterAlt(localctx, 1)
self.state = 279
self.match(LaTeXParser.CMD_FRAC)
self.state = 280
self.match(LaTeXParser.L_BRACE)
self.state = 281
localctx.upper = self.expr()
self.state = 282
self.match(LaTeXParser.R_BRACE)
self.state = 283
self.match(LaTeXParser.L_BRACE)
self.state = 284
localctx.lower = self.expr()
self.state = 285
self.match(LaTeXParser.R_BRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BinomContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(LaTeXParser.BinomContext, self).__init__(parent, invokingState)
self.parser = parser
self.n = None # ExprContext
self.k = None # ExprContext
def L_BRACE(self, i=None):
if i is None:
return self.getTokens(LaTeXParser.L_BRACE)
else:
return self.getToken(LaTeXParser.L_BRACE, i)
def R_BRACE(self, i=None):
if i is None:
return self.getTokens(LaTeXParser.R_BRACE)
else:
return self.getToken(LaTeXParser.R_BRACE, i)
def CMD_BINOM(self):
return self.getToken(LaTeXParser.CMD_BINOM, 0)
def CMD_DBINOM(self):
return self.getToken(LaTeXParser.CMD_DBINOM, 0)
def CMD_TBINOM(self):
return self.getToken(LaTeXParser.CMD_TBINOM, 0)
def expr(self, i=None):
if i is None:
return self.getTypedRuleContexts(LaTeXParser.ExprContext)
else:
return self.getTypedRuleContext(LaTeXParser.ExprContext,i)
def getRuleIndex(self):
return LaTeXParser.RULE_binom
def binom(self):
localctx = LaTeXParser.BinomContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_binom)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 287
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LaTeXParser.CMD_BINOM) | (1 << LaTeXParser.CMD_DBINOM) | (1 << LaTeXParser.CMD_TBINOM))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 288
self.match(LaTeXParser.L_BRACE)
self.state = 289
localctx.n = self.expr()
self.state = 290
self.match(LaTeXParser.R_BRACE)
self.state = 291
self.match(LaTeXParser.L_BRACE)
self.state = 292
localctx.k = self.expr()
self.state = 293
self.match(LaTeXParser.R_BRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Func_normalContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(LaTeXParser.Func_normalContext, self).__init__(parent, invokingState)
self.parser = parser
def FUNC_EXP(self):
return self.getToken(LaTeXParser.FUNC_EXP, 0)
def FUNC_LOG(self):
return self.getToken(LaTeXParser.FUNC_LOG, 0)
def FUNC_LN(self):
return self.getToken(LaTeXParser.FUNC_LN, 0)
def FUNC_SIN(self):
return self.getToken(LaTeXParser.FUNC_SIN, 0)
def FUNC_COS(self):
return self.getToken(LaTeXParser.FUNC_COS, 0)
def FUNC_TAN(self):
return self.getToken(LaTeXParser.FUNC_TAN, 0)
def FUNC_CSC(self):
return self.getToken(LaTeXParser.FUNC_CSC, 0)
def FUNC_SEC(self):
return self.getToken(LaTeXParser.FUNC_SEC, 0)
def FUNC_COT(self):
return self.getToken(LaTeXParser.FUNC_COT, 0)
def FUNC_ARCSIN(self):
return self.getToken(LaTeXParser.FUNC_ARCSIN, 0)
def FUNC_ARCCOS(self):
return self.getToken(LaTeXParser.FUNC_ARCCOS, 0)
def FUNC_ARCTAN(self):
return self.getToken(LaTeXParser.FUNC_ARCTAN, 0)
def FUNC_ARCCSC(self):
return self.getToken(LaTeXParser.FUNC_ARCCSC, 0)
def FUNC_ARCSEC(self):
return self.getToken(LaTeXParser.FUNC_ARCSEC, 0)
def FUNC_ARCCOT(self):
return self.getToken(LaTeXParser.FUNC_ARCCOT, 0)
def FUNC_SINH(self):
return self.getToken(LaTeXParser.FUNC_SINH, 0)
def FUNC_COSH(self):
return self.getToken(LaTeXParser.FUNC_COSH, 0)
def FUNC_TANH(self):
return self.getToken(LaTeXParser.FUNC_TANH, 0)
def FUNC_ARSINH(self):
return self.getToken(LaTeXParser.FUNC_ARSINH, 0)
def FUNC_ARCOSH(self):
return self.getToken(LaTeXParser.FUNC_ARCOSH, 0)
def FUNC_ARTANH(self):
return self.getToken(LaTeXParser.FUNC_ARTANH, 0)
def getRuleIndex(self):
return LaTeXParser.RULE_func_normal
def func_normal(self):
localctx = LaTeXParser.Func_normalContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_func_normal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 295
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << LaTeXParser.FUNC_EXP) | (1 << LaTeXParser.FUNC_LOG) | (1 << LaTeXParser.FUNC_LN) | (1 << LaTeXParser.FUNC_SIN) | (1 << LaTeXParser.FUNC_COS) | (1 << LaTeXParser.FUNC_TAN) | (1 << LaTeXParser.FUNC_CSC) | (1 << LaTeXParser.FUNC_SEC) | (1 << LaTeXParser.FUNC_COT) | (1 << LaTeXParser.FUNC_ARCSIN) | (1 << LaTeXParser.FUNC_ARCCOS) | (1 << LaTeXParser.FUNC_ARCTAN) | (1 << LaTeXParser.FUNC_ARCCSC) | (1 << LaTeXParser.FUNC_ARCSEC) | (1 << LaTeXParser.FUNC_ARCCOT) | (1 << LaTeXParser.FUNC_SINH) | (1 << LaTeXParser.FUNC_COSH) | (1 << LaTeXParser.FUNC_TANH) | (1 << LaTeXParser.FUNC_ARSINH) | (1 << LaTeXParser.FUNC_ARCOSH) | (1 << LaTeXParser.FUNC_ARTANH))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FuncContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(LaTeXParser.FuncContext, self).__init__(parent, invokingState)
self.parser = parser
self.root = None # ExprContext
self.base = None # ExprContext
def func_normal(self):
return self.getTypedRuleContext(LaTeXParser.Func_normalContext,0)
def L_PAREN(self):
return self.getToken(LaTeXParser.L_PAREN, 0)
def func_arg(self):
return self.getTypedRuleContext(LaTeXParser.Func_argContext,0)
def R_PAREN(self):
return self.getToken(LaTeXParser.R_PAREN, 0)
def func_arg_noparens(self):
return self.getTypedRuleContext(LaTeXParser.Func_arg_noparensContext,0)
def subexpr(self):
return self.getTypedRuleContext(LaTeXParser.SubexprContext,0)
def supexpr(self):
return self.getTypedRuleContext(LaTeXParser.SupexprContext,0)
def args(self):
return self.getTypedRuleContext(LaTeXParser.ArgsContext,0)
def LETTER(self):
return self.getToken(LaTeXParser.LETTER, 0)
def SYMBOL(self):
return self.getToken(LaTeXParser.SYMBOL, 0)
def FUNC_INT(self):
return self.getToken(LaTeXParser.FUNC_INT, 0)
def DIFFERENTIAL(self):
return self.getToken(LaTeXParser.DIFFERENTIAL, 0)
def frac(self):
return self.getTypedRuleContext(LaTeXParser.FracContext,0)
def additive(self):
return self.getTypedRuleContext(LaTeXParser.AdditiveContext,0)
def FUNC_SQRT(self):
return self.getToken(LaTeXParser.FUNC_SQRT, 0)
def L_BRACE(self):
return self.getToken(LaTeXParser.L_BRACE, 0)
def R_BRACE(self):
return self.getToken(LaTeXParser.R_BRACE, 0)
def expr(self, i=None):
if i is None:
return self.getTypedRuleContexts(LaTeXParser.ExprContext)
else:
return self.getTypedRuleContext(LaTeXParser.ExprContext,i)
def L_BRACKET(self):
return self.getToken(LaTeXParser.L_BRACKET, 0)
def R_BRACKET(self):
return self.getToken(LaTeXParser.R_BRACKET, 0)
def mp(self):
return self.getTypedRuleContext(LaTeXParser.MpContext,0)
def FUNC_SUM(self):
return self.getToken(LaTeXParser.FUNC_SUM, 0)
def FUNC_PROD(self):
return self.getToken(LaTeXParser.FUNC_PROD, 0)
def subeq(self):
return self.getTypedRuleContext(LaTeXParser.SubeqContext,0)
def FUNC_LIM(self):
return self.getToken(LaTeXParser.FUNC_LIM, 0)
def limit_sub(self):
return self.getTypedRuleContext(LaTeXParser.Limit_subContext,0)
def getRuleIndex(self):
return LaTeXParser.RULE_func
def func(self):
localctx = LaTeXParser.FuncContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_func)
self._la = 0 # Token type
try:
self.state = 370
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LaTeXParser.FUNC_EXP, LaTeXParser.FUNC_LOG, LaTeXParser.FUNC_LN, LaTeXParser.FUNC_SIN, LaTeXParser.FUNC_COS, LaTeXParser.FUNC_TAN, LaTeXParser.FUNC_CSC, LaTeXParser.FUNC_SEC, LaTeXParser.FUNC_COT, LaTeXParser.FUNC_ARCSIN, LaTeXParser.FUNC_ARCCOS, LaTeXParser.FUNC_ARCTAN, LaTeXParser.FUNC_ARCCSC, LaTeXParser.FUNC_ARCSEC, LaTeXParser.FUNC_ARCCOT, LaTeXParser.FUNC_SINH, LaTeXParser.FUNC_COSH, LaTeXParser.FUNC_TANH, LaTeXParser.FUNC_ARSINH, LaTeXParser.FUNC_ARCOSH, LaTeXParser.FUNC_ARTANH]:
self.enterOuterAlt(localctx, 1)
self.state = 297
self.func_normal()
self.state = 310
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,30,self._ctx)
if la_ == 1:
self.state = 299
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.UNDERSCORE:
self.state = 298
self.subexpr()
self.state = 302
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.CARET:
self.state = 301
self.supexpr()
pass
elif la_ == 2:
self.state = 305
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.CARET:
self.state = 304
self.supexpr()
self.state = 308
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.UNDERSCORE:
self.state = 307
self.subexpr()
pass
self.state = 317
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,31,self._ctx)
if la_ == 1:
self.state = 312
self.match(LaTeXParser.L_PAREN)
self.state = 313
self.func_arg()
self.state = 314
self.match(LaTeXParser.R_PAREN)
pass
elif la_ == 2:
self.state = 316
self.func_arg_noparens()
pass
pass
elif token in [LaTeXParser.LETTER, LaTeXParser.SYMBOL]:
self.enterOuterAlt(localctx, 2)
self.state = 319
_la = self._input.LA(1)
if not(_la==LaTeXParser.LETTER or _la==LaTeXParser.SYMBOL):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 321
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.UNDERSCORE:
self.state = 320
self.subexpr()
self.state = 323
self.match(LaTeXParser.L_PAREN)
self.state = 324
self.args()
self.state = 325
self.match(LaTeXParser.R_PAREN)
pass
elif token in [LaTeXParser.FUNC_INT]:
self.enterOuterAlt(localctx, 3)
self.state = 327
self.match(LaTeXParser.FUNC_INT)
self.state = 334
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LaTeXParser.UNDERSCORE]:
self.state = 328
self.subexpr()
self.state = 329
self.supexpr()
pass
elif token in [LaTeXParser.CARET]:
self.state = 331
self.supexpr()
self.state = 332
self.subexpr()
pass
elif token in [LaTeXParser.ADD, LaTeXParser.SUB, LaTeXParser.L_PAREN, LaTeXParser.L_BRACE, LaTeXParser.L_BRACKET, LaTeXParser.BAR, LaTeXParser.FUNC_LIM, LaTeXParser.FUNC_INT, LaTeXParser.FUNC_SUM, LaTeXParser.FUNC_PROD, LaTeXParser.FUNC_EXP, LaTeXParser.FUNC_LOG, LaTeXParser.FUNC_LN, LaTeXParser.FUNC_SIN, LaTeXParser.FUNC_COS, LaTeXParser.FUNC_TAN, LaTeXParser.FUNC_CSC, LaTeXParser.FUNC_SEC, LaTeXParser.FUNC_COT, LaTeXParser.FUNC_ARCSIN, LaTeXParser.FUNC_ARCCOS, LaTeXParser.FUNC_ARCTAN, LaTeXParser.FUNC_ARCCSC, LaTeXParser.FUNC_ARCSEC, LaTeXParser.FUNC_ARCCOT, LaTeXParser.FUNC_SINH, LaTeXParser.FUNC_COSH, LaTeXParser.FUNC_TANH, LaTeXParser.FUNC_ARSINH, LaTeXParser.FUNC_ARCOSH, LaTeXParser.FUNC_ARTANH, LaTeXParser.FUNC_SQRT, LaTeXParser.CMD_FRAC, LaTeXParser.CMD_BINOM, LaTeXParser.CMD_DBINOM, LaTeXParser.CMD_TBINOM, LaTeXParser.CMD_MATHIT, LaTeXParser.DIFFERENTIAL, LaTeXParser.LETTER, LaTeXParser.NUMBER, LaTeXParser.SYMBOL]:
pass
else:
pass
self.state = 342
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,35,self._ctx)
if la_ == 1:
self.state = 337
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.state = 336
self.additive(0)
self.state = 339
self.match(LaTeXParser.DIFFERENTIAL)
pass
elif la_ == 2:
self.state = 340
self.frac()
pass
elif la_ == 3:
self.state = 341
self.additive(0)
pass
pass
elif token in [LaTeXParser.FUNC_SQRT]:
self.enterOuterAlt(localctx, 4)
self.state = 344
self.match(LaTeXParser.FUNC_SQRT)
self.state = 349
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==LaTeXParser.L_BRACKET:
self.state = 345
self.match(LaTeXParser.L_BRACKET)
self.state = 346
localctx.root = self.expr()
self.state = 347
self.match(LaTeXParser.R_BRACKET)
self.state = 351
self.match(LaTeXParser.L_BRACE)
self.state = 352
localctx.base = self.expr()
self.state = 353
self.match(LaTeXParser.R_BRACE)
pass
elif token in [LaTeXParser.FUNC_SUM, LaTeXParser.FUNC_PROD]:
self.enterOuterAlt(localctx, 5)
self.state = 355
_la = self._input.LA(1)
if not(_la==LaTeXParser.FUNC_SUM or _la==LaTeXParser.FUNC_PROD):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 362
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [LaTeXParser.UNDERSCORE]:
self.state = 356
self.subeq()
self.state = 357
self.supexpr()
pass
elif token in [LaTeXParser.CARET]:
self.state = 359
self.supexpr()
self.state = 360
self.subeq()
pass
else:
raise NoViableAltException(self)
self.state = 364
self.mp(0)
pass
elif token in [LaTeXParser.FUNC_LIM]:
self.enterOuterAlt(localctx, 6)
self.state = 366
self.match(LaTeXParser.FUNC_LIM)
self.state = 367
self.limit_sub()
self.state = 368
self.mp(0)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgsContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(LaTeXParser.ArgsContext, self).__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(LaTeXParser.ExprContext,0)
def args(self):
return self.getTypedRuleContext(LaTeXParser.ArgsContext,0)
def getRuleIndex(self):
return LaTeXParser.RULE_args
def args(self):
localctx = LaTeXParser.ArgsContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_args)
try:
self.state = 377
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,39,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 372
self.expr()
self.state = 373
self.match(LaTeXParser.T__0)
self.state = 374
self.args()
pass
elif la_ | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DiscountCodeArgs', 'DiscountCode']
@pulumi.input_type
class DiscountCodeArgs:
def __init__(__self__, *,
cart_discounts: pulumi.Input[Sequence[pulumi.Input[str]]],
code: pulumi.Input[str],
description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_active: Optional[pulumi.Input[bool]] = None,
max_applications: Optional[pulumi.Input[int]] = None,
max_applications_per_customer: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[Mapping[str, Any]]] = None,
predicate: Optional[pulumi.Input[str]] = None,
valid_from: Optional[pulumi.Input[str]] = None,
valid_until: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DiscountCode resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cart_discounts: The referenced matching cart discounts can be applied to the cart once the DiscountCode is added
:param pulumi.Input[str] code: Unique identifier of this discount code. This value is added to the cart to enable the related cart discounts in the
cart
:param pulumi.Input[Mapping[str, Any]] description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: The groups to which this discount code belong
:param pulumi.Input[int] max_applications: The discount code can only be applied maxApplications times
:param pulumi.Input[int] max_applications_per_customer: The discount code can only be applied maxApplicationsPerCustomer times per customer
:param pulumi.Input[Mapping[str, Any]] name: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: [Cart Predicate](https://docs.commercetools.com/api/projects/predicates#cart-predicates)
:param pulumi.Input[str] valid_from: The time from which the discount can be applied on a cart. Before that time the code is invalid
:param pulumi.Input[str] valid_until: The time until the discount can be applied on a cart. After that time the code is invalid
"""
pulumi.set(__self__, "cart_discounts", cart_discounts)
pulumi.set(__self__, "code", code)
if description is not None:
pulumi.set(__self__, "description", description)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if is_active is not None:
pulumi.set(__self__, "is_active", is_active)
if max_applications is not None:
pulumi.set(__self__, "max_applications", max_applications)
if max_applications_per_customer is not None:
pulumi.set(__self__, "max_applications_per_customer", max_applications_per_customer)
if name is not None:
pulumi.set(__self__, "name", name)
if predicate is not None:
pulumi.set(__self__, "predicate", predicate)
if valid_from is not None:
pulumi.set(__self__, "valid_from", valid_from)
if valid_until is not None:
pulumi.set(__self__, "valid_until", valid_until)
@property
@pulumi.getter(name="cartDiscounts")
def cart_discounts(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The referenced matching cart discounts can be applied to the cart once the DiscountCode is added
"""
return pulumi.get(self, "cart_discounts")
@cart_discounts.setter
def cart_discounts(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "cart_discounts", value)
@property
@pulumi.getter
def code(self) -> pulumi.Input[str]:
"""
Unique identifier of this discount code. This value is added to the cart to enable the related cart discounts in the
cart
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: pulumi.Input[str]):
pulumi.set(self, "code", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The groups to which this discount code belong
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="isActive")
def is_active(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_active")
@is_active.setter
def is_active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_active", value)
@property
@pulumi.getter(name="maxApplications")
def max_applications(self) -> Optional[pulumi.Input[int]]:
"""
The discount code can only be applied maxApplications times
"""
return pulumi.get(self, "max_applications")
@max_applications.setter
def max_applications(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_applications", value)
@property
@pulumi.getter(name="maxApplicationsPerCustomer")
def max_applications_per_customer(self) -> Optional[pulumi.Input[int]]:
"""
The discount code can only be applied maxApplicationsPerCustomer times per customer
"""
return pulumi.get(self, "max_applications_per_customer")
@max_applications_per_customer.setter
def max_applications_per_customer(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_applications_per_customer", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def predicate(self) -> Optional[pulumi.Input[str]]:
"""
[Cart Predicate](https://docs.commercetools.com/api/projects/predicates#cart-predicates)
"""
return pulumi.get(self, "predicate")
@predicate.setter
def predicate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predicate", value)
@property
@pulumi.getter(name="validFrom")
def valid_from(self) -> Optional[pulumi.Input[str]]:
"""
The time from which the discount can be applied on a cart. Before that time the code is invalid
"""
return pulumi.get(self, "valid_from")
@valid_from.setter
def valid_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_from", value)
@property
@pulumi.getter(name="validUntil")
def valid_until(self) -> Optional[pulumi.Input[str]]:
"""
The time until the discount can be applied on a cart. After that time the code is invalid
"""
return pulumi.get(self, "valid_until")
@valid_until.setter
def valid_until(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_until", value)
@pulumi.input_type
class _DiscountCodeState:
def __init__(__self__, *,
cart_discounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_active: Optional[pulumi.Input[bool]] = None,
max_applications: Optional[pulumi.Input[int]] = None,
max_applications_per_customer: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[Mapping[str, Any]]] = None,
predicate: Optional[pulumi.Input[str]] = None,
valid_from: Optional[pulumi.Input[str]] = None,
valid_until: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering DiscountCode resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cart_discounts: The referenced matching cart discounts can be applied to the cart once the DiscountCode is added
:param pulumi.Input[str] code: Unique identifier of this discount code. This value is added to the cart to enable the related cart discounts in the
cart
:param pulumi.Input[Mapping[str, Any]] description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: The groups to which this discount code belong
:param pulumi.Input[int] max_applications: The discount code can only be applied maxApplications times
:param pulumi.Input[int] max_applications_per_customer: The discount code can only be applied maxApplicationsPerCustomer times per customer
:param pulumi.Input[Mapping[str, Any]] name: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: [Cart Predicate](https://docs.commercetools.com/api/projects/predicates#cart-predicates)
:param pulumi.Input[str] valid_from: The time from which the discount can be applied on a cart. Before that time the code is invalid
:param pulumi.Input[str] valid_until: The time until the discount can be applied on a cart. After that time the code is invalid
"""
if cart_discounts is not None:
pulumi.set(__self__, "cart_discounts", cart_discounts)
if code is not None:
pulumi.set(__self__, "code", code)
if description is not None:
pulumi.set(__self__, "description", description)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if is_active is not None:
pulumi.set(__self__, "is_active", is_active)
if max_applications is not None:
pulumi.set(__self__, "max_applications", max_applications)
if max_applications_per_customer is not None:
pulumi.set(__self__, "max_applications_per_customer", max_applications_per_customer)
if name is not None:
pulumi.set(__self__, "name", name)
if predicate is not None:
pulumi.set(__self__, "predicate", predicate)
if valid_from is not None:
pulumi.set(__self__, "valid_from", valid_from)
if valid_until is not None:
pulumi.set(__self__, "valid_until", valid_until)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="cartDiscounts")
def cart_discounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The referenced matching cart discounts can be applied to the cart once the DiscountCode is added
"""
return pulumi.get(self, "cart_discounts")
@cart_discounts.setter
def cart_discounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cart_discounts", value)
@property
@pulumi.getter
def code(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier of this discount code. This value is added to the cart to enable the related cart discounts in the
cart
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The groups to which this discount code belong
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="isActive")
def is_active(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_active")
@is_active.setter
def is_active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_active", value)
@property
@pulumi.getter(name="maxApplications")
def max_applications(self) -> Optional[pulumi.Input[int]]:
"""
The discount code can only be applied maxApplications times
"""
return pulumi.get(self, "max_applications")
@max_applications.setter
def max_applications(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_applications", value)
@property
@pulumi.getter(name="maxApplicationsPerCustomer")
def max_applications_per_customer(self) -> Optional[pulumi.Input[int]]:
"""
The discount code can only be applied maxApplicationsPerCustomer times per customer
"""
return pulumi.get(self, "max_applications_per_customer")
@max_applications_per_customer.setter
def max_applications_per_customer(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_applications_per_customer", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def predicate(self) -> Optional[pulumi.Input[str]]:
"""
[Cart Predicate](https://docs.commercetools.com/api/projects/predicates#cart-predicates)
"""
return pulumi.get(self, "predicate")
@predicate.setter
def predicate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predicate", value)
@property
@pulumi.getter(name="validFrom")
def valid_from(self) -> Optional[pulumi.Input[str]]:
"""
The time from which the discount can be applied on a cart. Before that time the code is invalid
"""
return pulumi.get(self, "valid_from")
@valid_from.setter
def valid_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_from", value)
@property
@pulumi.getter(name="validUntil")
def valid_until(self) -> Optional[pulumi.Input[str]]:
"""
The time until the discount can be applied on a cart. After that time the code is invalid
"""
return pulumi.get(self, "valid_until")
@valid_until.setter
def valid_until(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "valid_until", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class DiscountCode(pulumi.CustomResource):
@overload
def | |
are sanitized correctly
"""
importer = ixf.Importer()
sanitized = importer.sanitize_vlans(json.loads(data_ixf_vlan.input)["vlan_list"])
assert sanitized == data_ixf_vlan.expected["vlan_list"]
@pytest.mark.django_db
def test_chained_consolidate_add_del(entities):
"""
Tests the edge cause of a consolidated-add-del operation
being the requirement of a new consolidated-add-del operation
which would cause the bug described in #889
"""
data = setup_test_data("ixf.member.3") # asn1001
network = entities["net"]["UPDATE_DISABLED"] # asn1001
ixlan = entities["ixlan"][0]
if not network.ipv4_support or not network.ipv6_support:
return
# create netixlan that will be suggested to be deleted
# as part of consolidate-add-del operation
NetworkIXLan.objects.create(
network=network,
ixlan=ixlan,
asn=network.asn,
speed=10000,
ipaddr4="172.16.17.32",
ipaddr6=None,
status="ok",
is_rs_peer=True,
operational=True,
)
# create consolidated add suggestion for netixlan above
ixf_member_data_field = {
"ixp_id": 42,
"state": "connected",
"if_list": [{"switch_id": 1, "if_speed": 20000, "if_type": "LR4"}],
"vlan_list": [
{
"vlan_id": 0,
"ipv4": {
"address": "172.16.17.32",
"routeserver": True,
"as_macro": "AS-NFLX-V4",
},
"ipv6": {
"address": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:2",
"routeserver": True,
"as_macro": "AS-NFLX-V6",
},
}
],
}
ixf_member_add = IXFMemberData.objects.create(
asn=network.asn,
ipaddr4="172.16.17.32",
ipaddr6="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:2",
ixlan=ixlan,
speed=10000,
fetched=datetime.datetime.now(datetime.timezone.utc),
operational=True,
is_rs_peer=True,
status="ok",
data=json.dumps(ixf_member_data_field),
)
# create consolidated delete suggestion for netixlan above
ixf_member_del = IXFMemberData.objects.create(
asn=network.asn,
ipaddr4="172.16.17.32",
ipaddr6=None,
ixlan=ixlan,
speed=10000,
fetched=datetime.datetime.now(datetime.timezone.utc),
operational=True,
is_rs_peer=True,
status="ok",
)
ixf_member_add.set_requirement(ixf_member_del)
assert ixf_member_add.action == "modify"
assert ixf_member_add.primary_requirement == ixf_member_del
# now run the import that will trigger a third consolidated-add-del
# operation with the requirment of ixf_member_add as a deletion
# causing a chain of requirements (#889)
importer = ixf.Importer()
importer.update(ixlan, data=data)
@override_settings(MAIL_DEBUG=False)
@pytest.mark.django_db
def test_send_email(entities, use_ip):
# Setup is from test_suggest_add()
print(f"Debug mode for mail: {settings.MAIL_DEBUG}")
data = setup_test_data("ixf.member.3") # asn1001
network = entities["net"]["UPDATE_DISABLED"] # asn1001
ixlan = entities["ixlan"][0]
# This appears in the remote-ixf data so should not
# create a IXFMemberData instance
entities["netixlan"].append(
NetworkIXLan.objects.create(
network=network,
ixlan=ixlan,
asn=network.asn,
speed=10000,
ipaddr4=use_ip(4, "172.16.17.32"),
ipaddr6=use_ip(6, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:3"),
status="ok",
is_rs_peer=True,
operational=True,
)
)
importer = ixf.Importer()
importer.update(ixlan, data=data)
# This should actually send an email
importer.notify_proposals()
assert importer.emails == 2
@pytest.mark.django_db
def test_ixlan_add_netixlan_no_redundant_save_on_null_ip(entities):
"""
Tests that if ixlan.add_netixlan receives a netixlan which
has either ipaddr4 or ipaddr6 nulled will not cause redundant
saves to already deleted netixlans that also have that same field
nulled (#1019)
"""
network = entities["net"]["UPDATE_ENABLED"]
ixlan = entities["ixlan"][0]
# create deleted netixlans
with reversion.create_revision():
NetworkIXLan.objects.create(
ixlan=ixlan,
network=network,
asn=network.asn + 1,
ipaddr4="172.16.31.10",
ipaddr6="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:10",
speed=1000,
status="deleted",
)
NetworkIXLan.objects.create(
ixlan=ixlan,
network=network,
asn=network.asn + 1,
ipaddr4="172.16.58.3",
ipaddr6="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:11",
speed=1000,
status="deleted",
)
netixlan6 = NetworkIXLan.objects.create(
ixlan=ixlan,
network=network,
asn=network.asn + 1,
ipaddr4=None,
ipaddr6="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:2906:9",
speed=1000,
status="deleted",
)
netixlan4 = NetworkIXLan.objects.create(
ixlan=ixlan,
network=network,
asn=network.asn + 1,
ipaddr4="172.16.17.32",
ipaddr6=None,
speed=1000,
status="deleted",
)
netixlan4.refresh_from_db()
netixlan6.refresh_from_db()
assert netixlan4.version == 1
assert netixlan6.version == 1
# create netixlans
netixlan6_new = NetworkIXLan(
ixlan=ixlan,
network=network,
asn=network.asn,
ipaddr4=None,
ipaddr6="fc00:db20:35b:7399::5",
speed=1000,
status="deleted",
)
netixlan4_new = NetworkIXLan(
ixlan=ixlan,
network=network,
asn=network.asn,
ipaddr4="172.16.58.3",
ipaddr6=None,
speed=1000,
status="deleted",
)
with reversion.create_revision():
netixlan6_new = ixlan.add_netixlan(netixlan6_new)
netixlan4_new = ixlan.add_netixlan(netixlan4_new)
netixlan4.refresh_from_db()
netixlan6.refresh_from_db()
# No further saves should have happened to the already
# deleted netixlans
assert not netixlan4.notes
assert not netixlan6.notes
assert netixlan4.version == 1
assert netixlan6.version == 1
# FIXTURES
@pytest.fixture(params=[True, False])
def save(request):
return request.param
def entities_ipv4_only(_entities):
"""
Same as entities, but network gets configured
to only support IPv4
"""
for net in _entities["net"].values():
net.info_ipv6 = False
net.info_unicast = True
net.save()
return _entities
def entities_ipv6_only(_entities):
"""
Same as entities, but network gets configured
to only support IPv6
"""
for net in _entities["net"].values():
net.info_unicast = False
net.info_ipv6 = True
net.save()
return _entities
def entities_ipv4_ipv6_implied(_entities):
"""
Same as entities, but network gets configured
to imply support for both protocols by having
neither set.
"""
for net in _entities["net"].values():
net.info_unicast = False
net.info_ipv6 = False
net.save()
return _entities
def entities_ipv4_ipv6(_entities):
for net in _entities["net"].values():
net.info_unicast = True
net.info_ipv6 = True
net.save()
return _entities
def entities_base():
entities = {}
with reversion.create_revision():
entities["org"] = [Organization.objects.create(name="Netflix", status="ok")]
# create exchange(s)
entities["ix"] = [
InternetExchange.objects.create(
name="Test Exchange One",
org=entities["org"][0],
status="ok",
tech_email="<EMAIL>",
),
InternetExchange.objects.create(
name="Test Exchange Two",
org=entities["org"][0],
status="ok",
tech_email="ix2@<EMAIL>",
),
]
# create ixlan(s)
entities["ixlan"] = [ix.ixlan for ix in entities["ix"]]
# create ixlan prefix(s)
entities["ixpfx"] = [
IXLanPrefix.objects.create(
ixlan=entities["ixlan"][0],
status="ok",
prefix="172.16.31.10/22",
protocol="IPv4",
),
IXLanPrefix.objects.create(
ixlan=entities["ixlan"][0],
status="ok",
prefix="2001:7f8:1::/64",
protocol="IPv6",
),
IXLanPrefix.objects.create(
ixlan=entities["ixlan"][1],
status="ok",
prefix="172.16.31.10/22",
protocol="IPv4",
),
IXLanPrefix.objects.create(
ixlan=entities["ixlan"][1],
status="ok",
prefix="2001:7f8:4::/64",
protocol="IPv6",
),
]
# create network(s)
entities["net"] = {
"UPDATE_ENABLED": Network.objects.create(
name="Network w allow ixp update enabled",
org=entities["org"][0],
asn=2906,
info_prefixes4=42,
info_prefixes6=42,
website="http://netflix.com/",
policy_general="Open",
policy_url="https://www.netflix.com/openconnect/",
allow_ixp_update=True,
status="ok",
irr_as_set="AS-NFLX",
info_unicast=True,
info_ipv6=True,
),
"UPDATE_DISABLED": Network.objects.create(
name="Network w allow ixp update disabled",
org=entities["org"][0],
asn=1001,
allow_ixp_update=False,
status="ok",
info_prefixes4=42,
info_prefixes6=42,
website="http://netflix.com/",
policy_general="Open",
policy_url="https://www.netflix.com/openconnect/",
info_unicast=True,
info_ipv6=True,
),
"UPDATE_DISABLED_2": Network.objects.create(
name="Network w allow ixp update disabled (2)",
org=entities["org"][0],
asn=1101,
allow_ixp_update=False,
status="ok",
info_prefixes4=42,
info_prefixes6=42,
website="http://netflix.com/",
policy_general="Open",
policy_url="https://www.netflix.com/openconnect/",
info_unicast=True,
info_ipv6=True,
),
}
entities["netcontact"] = [
NetworkContact.objects.create(
email="network1@localhost",
network=entities["net"]["UPDATE_ENABLED"],
status="ok",
role="Policy",
),
NetworkContact.objects.create(
email="network2@localhost",
network=entities["net"]["UPDATE_DISABLED"],
status="ok",
role="Policy",
),
]
entities["netixlan"] = []
admin_user = User.objects.create_user("admin", "admin@localhost", "admin")
ixf_importer_user = User.objects.create_user(
"ixf_importer", "ixf_importer<EMAIL>", "ixf_importer"
)
entities["org"][0].admin_usergroup.user_set.add(admin_user)
return entities
@pytest.fixture(
params=[
entities_ipv4_ipv6,
entities_ipv4_ipv6_implied,
entities_ipv4_only,
entities_ipv6_only,
]
)
def entities(request):
_entities = entities_base()
_func = request.param
return _func(_entities)
class UseIPAddrWrapper:
"""
To help test what happens when a network only
sets either ip4 or ip6 address on their netixlan
as well as both
"""
def __init__(self, use_ipv4, use_ipv6):
self.use_ipv4 = use_ipv4
self.use_ipv6 = use_ipv6
def __call__(self, ipv, value=True):
if ipv == 4:
if self.use_ipv4:
return ipaddress.ip_address(value)
return None
elif ipv == 6:
if self.use_ipv6:
return ipaddress.ip_address(value)
return None
raise ValueError(ipv)
@pytest.fixture(params=[(True, True), (True, False), (False, True)])
def use_ip(request):
"""
Fixture that gives back 3 instances of UseIpAddrWrapper
1) use ip4, use ip6
2) use ip4, dont use ip6
3) dont use ip4, use ip6
"""
use_ipv4, use_ipv6 = request.param
return UseIPAddrWrapper(use_ipv4, use_ipv6)
@pytest.fixture(params=[(True, False), (False, True)])
def use_ip_alt(request):
"""
Fixture that gives back 2 instances of UseIpAddrWrapper
1) use ip4, dont use ip6
2) dont use ip4, use ip6
"""
use_ipv4, use_ipv6 = request.param
return UseIPAddrWrapper(use_ipv4, use_ipv6)
# CUSTOM ASSERTIONS
def assert_ticket_exists(ticket_info):
"""
Input is a list of tuples containing (asn, ipaddr4, ipaddr6) that should appear
in deskpro tickets
"""
assert DeskProTicket.objects.count() == len(ticket_info)
for ticket in DeskProTicket.objects.all():
print(ticket.subject)
print("-" * 80)
for asn, ip4, ip6 in ticket_info:
assert DeskProTicket.objects.filter(
subject__endswith=f"AS{asn} {ip4} {ip6}"
).exists()
def assert_network_email(network, email_info):
network_email = IXFImportEmail.objects.filter(net=network.id).first()
print("Network email")
print("Body:")
print(network_email.message)
for email_i in email_info:
email_str = create_email_str(email_i)
assert email_str in network_email.message
def assert_ix_email(ix, email_info):
ix_email = IXFImportEmail.objects.filter(ix=ix.id).first()
print("IX email")
print("Body:")
print(ix_email.message)
for email_i in email_info:
email_str = create_email_str(email_i)
assert email_str in ix_email.message
def create_email_str(email):
email = list(email)
if not email[2]:
email[2] = "IPv4 not set"
if not email[3]:
email[3] = "IPv6 not set"
return "{} AS{} - {} - {}".format(*email)
def assert_no_ticket_exists():
assert DeskProTicket.objects.count() == 0
def assert_no_emails(network=None, ix=None):
if network and (not network.ipv4_support or not network.ipv6_support):
assert_protocol_conflict_email(network, ix=ix, network=network)
else:
assert IXFImportEmail.objects.count() == 0
def assert_no_ix_email(ix):
assert IXFImportEmail.objects.filter(ix=ix.id).count() == 0
def assert_protocol_conflict_email(protocols, ix=None, network=None, solo=True):
"""
Here we assert that protocol conflict notifications go out
protocols should be the network instance that defines the protocol
support
if ix is set we assert the notification exists for the ix
if network is set we assert the notification exists for the network
if solo is True we assert that this is the only notification that exists
"""
if not protocols.ipv4_support:
unsupported = 4
elif not protocols.ipv6_support:
unsupported = 6
else:
raise Exception("Both protocols appear supported")
search = f"data provides IPv{unsupported} addresses for some "
if network:
qset = IXFImportEmail.objects.filter(net=network)
if solo:
assert qset.count() == 1
assert qset.filter(message__contains=search).count() == 1
assert not qset.filter(message__contains="CREATE").exists()
assert not qset.filter(message__contains="MODIFY").exists()
assert not qset.filter(message__contains="REMOVE").exists()
else:
assert qset.filter(message__contains=search).exists
if ix:
qset = IXFImportEmail.objects.filter(ix=ix)
if solo:
assert qset.count() == 1
assert qset.filter(message__contains=search).count() == 1
assert not qset.filter(message__contains="CREATE").exists()
assert not qset.filter(message__contains="MODIFY").exists()
assert not qset.filter(message__contains="REMOVE").exists()
else:
assert qset.filter(message__contains=search).exists
def assert_no_network_email(network):
if network.ipv4_support and network.ipv6_support:
assert IXFImportEmail.objects.filter(net=network.id).count() == 0
else:
assert_protocol_conflict_email(
protocols=network,
network=network,
)
def ticket_list():
return [(t.id, t.subject) for t in DeskProTicket.objects.all().order_by("id")]
def email_list():
return [(t.id, t.subject) for t in IXFImportEmail.objects.all().order_by("id")]
def ixf_member_data_list():
return [
(m.id, m.ipaddr4, m.ipaddr6, m.updated)
for m in IXFMemberData.objects.all().order_by("id")
]
def netixlan_list():
return [
(n.id, n.status, n.ipaddr4, n.ipaddr6, n.updated)
for n in NetworkIXLan.objects.all().order_by("id")
]
def assert_idempotent(importer, ixlan, data, save=True):
"""
run the importer for ixlan against data and
assert that there are
- no changes made to netixlan
- no changes made to deskpro ticket
- no changes made to ixf member data
"""
ixf_members = ixf_member_data_list()
tickets = ticket_list()
netixlans = netixlan_list()
emails = email_list()
def assert_no_changes():
assert ixf_members == | |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf_8 -*-
'''
FlickrSync
Sync files in local directory to Flickr.
'''
import codecs
import math
import fnmatch
import sys
import os
import re
import shutil
import logging
import datetime
import time
import tzlocal
import dateutil.parser
import random
import mimetypes
import threading
import traceback
import urllib
import webbrowser
import unicodedata
import json
import FileLock
from io import BytesIO
import httplib2
import oauth2 as oauth
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
try:
from ConfigParser import ConfigParser
except Exception:
from configparser import ConfigParser
# We need to import a XML Parser because Flickr doesn't return JSON for photo uploads -_-
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
#normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
raise ImportError('Failed to import ElementTree from any known place')
if sys.version_info >= (3, 0):
import queue
def urlrequest(url, data=None, headers={}):
return urllib.request.Request(url, data, headers)
def urlopen(u):
return urllib.request.urlopen(u)
def urlquote(u):
return urllib.parse.quote(u)
def urlencode(u):
return urllib.parse.urlencode(u)
def unicode(s):
return str(s)
def raw_input(s):
return input(s)
else:
import Queue
queue = Queue
import urllib2
def urlrequest(url, data=None, headers={}):
return urllib2.Request(url, data, headers)
def urlopen(u):
return urllib2.urlopen(u)
def urlquote(u):
return urllib.quote(u)
def urlencode(u):
return urllib.urlencode(u)
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
#------------------------------------------------
LOG = None
HP = HTMLParser()
UTF8 = codecs.lookup('utf-8')[3]
LTZ = tzlocal.get_localzone()
SENC = sys.getdefaultencoding()
FENC = sys.getfilesystemencoding()
DT1970 = datetime.datetime.fromtimestamp(0)
LOCK = threading.Lock()
# init
mimetypes.init()
#------------------------------------------------
def normpath(s):
return unicodedata.normalize('NFC', s)
def uprint(s):
with LOCK:
try:
print(s)
except Exception:
try:
print(s.encode(SENC))
except Exception:
print(s.encode('utf-8'))
def tprint(i, s):
n = datetime.datetime.now().strftime('%H:%M:%S ')
uprint(u'%s %s %s' % (i, n, s))
def udebug(s):
return
tprint('-', s)
if LOG:
LOG.debug(s)
def uinfo(s):
tprint('>', s)
if LOG:
LOG.info(s)
def uwarn(s):
tprint('+', s)
if LOG:
LOG.warn(s)
def uerror(s):
tprint('!', s)
if LOG:
LOG.error(s)
def uexception(ex):
traceback.print_exc()
if LOG:
LOG.exception(ex)
def szstr(n):
return "{:,}".format(n)
def todate(s):
return dateutil.parser.parse(s).replace(microsecond=0).astimezone(LTZ).replace(tzinfo=None)
def tmstr(t):
return t.strftime('%Y-%m-%d %H:%M:%S')
def mtstr(t):
return LTZ.localize(t).strftime('%Y-%m-%dT%H:%M:%S%z')
def mtime(p):
return datetime.datetime.fromtimestamp(os.path.getmtime(p)).replace(microsecond=0)
def ftime(dt):
return tseconds(dt - DT1970)
def tseconds(td):
return (td.seconds + td.days * 24 * 3600)
def touch(p, d = None):
atime = ftime(datetime.datetime.now())
mtime = atime if d is None else ftime(d)
os.utime(p, ( atime, mtime ))
def mkpdirs(p):
d = os.path.dirname(p)
if not os.path.exists(d):
os.makedirs(d)
def trimdir(p):
if p == '':
return p
if p[-1] == os.path.sep:
p = p[:-1]
return unicode(p)
def get_json_item(o):
if o:
if isinstance(o, dict):
o = o.get('_content');
if o:
o = HP.unescape(o)
return o
def print_progress(page, pages):
if pages == 0:
page = 0
sys.stdout.write("\b\b\b\b\b\b\b%3d/%-3d" % (page, pages))
sys.stdout.flush()
#---------------------------------------------------------------
class Config:
"""Singleton style/static initialization wrapper thing"""
def __init__(self):
self.dict = ConfigParser()
paths = (os.path.abspath('.flickrsync.ini'), os.path.expanduser('~/.flickrsync.ini'))
for filename in paths:
if os.path.exists(filename):
uinfo('using flickrsync.ini file "%s"' % os.path.abspath(filename))
fp = codecs.open(filename, "r", "utf-8")
self.dict.readfp(fp)
fp.close()
break
# debug
self.debug_log = self.get('debug_log', '')
# error
self.error_log = self.get('error_log', '')
# Location
self.root_dir = trimdir(os.path.abspath(self.get('root_dir', '.')))
# self.get('trash_dir', self.root_dir + '/.trash')
self.trash_dir = self.get('trash_dir', '')
if self.trash_dir:
self.trash_dir = trimdir(os.path.abspath(self.trash_dir))
# user web browser
self.webbrowser = True if self.get('webbrowser', 'true') == 'true' else False
# max_file_size (1GB)
self.max_file_size = int(self.get('max_file_size', '1073741824'))
# max retry
self.max_retry = int(self.get('max_retry', '3'))
# Threads
self.max_threads = int(self.get('num_threads', '4'))
# includes
self.includes = json.loads(self.get('includes', '[]'))
self.excludes = json.loads(self.get('excludes', '[]'))
self.fileexts = self.get('fileexts', 'jpeg jpg gif png tiff avi mov m4v mp4 wmv').split()
# tag split
self.tag_split_re = self.get('tag_split_re', r'[\\/ ,\_\-.;:]')
# keys
self.secret = self.get('secret', 'bba29b1d2de7b850')
self.api_key = self.get('api_key', 'f061bc1174e85d8cebe458e817dc515b')
# token
self.token_file = self.get('token_file', '.flickrsync.token')
if os.path.exists(self.token_file):
self.last_sync = mtime(self.token_file)
else:
self.last_sync = DT1970
# Flickr settings
self.hidden = self.get('hidden', 2)
self.public = self.get('public', 0)
self.friend = self.get('friend', 0)
self.family = self.get('family', 1)
def get(self, configparam, default=None):
"""get the value from the ini file's default section."""
defaults = self.dict.defaults()
if configparam in defaults:
return defaults[configparam]
if not default is None:
return default
raise KeyError(configparam)
# global config
config = Config()
#-------------------------------------------------------------------
class FlickrAPIError(Exception):
""" Generic error class, catch-all for most Tumblpy issues.
from Tumblpy import FlickrAPIError, FlickrAuthError
"""
def __init__(self, msg, error_code=None):
self.msg = msg
self.code = error_code
if error_code is not None and error_code < 100:
raise FlickrAuthError(msg, error_code)
def __str__(self):
return repr(self.msg)
class FlickrAuthError(FlickrAPIError):
""" Raised when you try to access a protected resource and it fails due to some issue with your authentication. """
def __init__(self, msg, error_code=None):
self.msg = msg
self.code = error_code
def __str__(self):
return repr(self.msg)
class FlickrAPI(object):
def __init__(self, api_key=None, api_secret=None, oauth_token=None, oauth_token_secret=None, callback_url=None, headers=None, client_args=None):
if not api_key or not api_secret:
raise FlickrAPIError('Please supply an api_key and api_secret.')
self.api_key = api_key
self.api_secret = api_secret
self.callback_url = callback_url
self.api_base = 'https://api.flickr.com/services'
self.up_api_base = 'https://up.flickr.com/services'
self.rest_api_url = '%s/rest' % self.api_base
self.upload_api_url = '%s/upload/' % self.up_api_base
self.replace_api_url = '%s/replace/' % self.up_api_base
self.request_token_url = 'https://www.flickr.com/services/oauth/request_token'
self.access_token_url = 'https://www.flickr.com/services/oauth/access_token'
self.authorize_url = 'https://www.flickr.com/services/oauth/authorize'
self.headers = headers
if self.headers is None:
self.headers = {'User-agent': 'PythonFlickrSync'}
self.oauth_token = None
self.oauth_token_secret = None
self.consumer = None
self.token = None
self.set_oauth_token(oauth_token, oauth_token_secret, client_args)
def set_oauth_token(self, oauth_token, oauth_token_secret, client_args=None):
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.client_args = client_args or {}
if self.api_key is not None and self.api_secret is not None:
self.consumer = oauth.Consumer(self.api_key, self.api_secret)
if self.oauth_token is not None and self.oauth_token_secret is not None:
self.token = oauth.Token(oauth_token, oauth_token_secret)
def get_http(self):
# Filter down through the possibilities here - if they have a token, if they're first stage, etc.
if self.consumer is not None and self.token is not None:
return oauth.Client(self.consumer, self.token, **self.client_args)
elif self.consumer is not None:
return oauth.Client(self.consumer, **self.client_args)
else:
# If they don't do authentication, but still want to request unprotected resources, we need an opener.
return httplib2.Http(**self.client_args)
def get_authentication_tokens(self, perms=None):
""" Returns an authorization url to give to your user.
Parameters:
perms - If None, this is ignored and uses your applications default perms. If set, will overwrite applications perms; acceptable perms (read, write, delete)
* read - permission to read private information
* write - permission to add, edit and delete photo metadata (includes 'read')
* delete - permission to delete photos (includes 'write' and 'read')
"""
request_args = {}
resp, content = self.get_http().request('%s?oauth_callback=%s' % (self.request_token_url, self.callback_url), 'GET', **request_args)
if resp['status'] != '200':
raise FlickrAuthError('There was a problem retrieving an authentication url.')
request_tokens = parse_qsl(content)
request_tokens = dict(request_tokens)
auth_url_params = {
'oauth_token': request_tokens[b'oauth_token']
}
accepted_perms = ('read', 'write', 'delete')
if perms and perms in accepted_perms:
auth_url_params['perms'] = perms
request_tokens['auth_url'] = '%s?%s' % (self.authorize_url, urlencode(auth_url_params))
return request_tokens
def get_auth_tokens(self, oauth_verifier):
""" Returns 'final' tokens to store and used to make authorized calls to Flickr.
Parameters:
oauth_token - oauth_token returned from when the user is redirected after hitting the get_auth_url() function
verifier - oauth_verifier returned from when the user is redirected after hitting the get_auth_url() function
"""
params = {
'oauth_verifier': oauth_verifier,
}
resp, content = self.get_http().request('%s?%s' % (self.access_token_url, urlencode(params)), 'GET')
if resp['status'] != '200':
raise FlickrAuthError('Getting access tokens failed: %s Response Status' % resp['status'])
return dict(parse_qsl(content))
def _convert_params(self, params):
"""Convert lists to strings with ',' between items."""
for (key, value) in params.items():
if isinstance(value, (int, long)):
params[key] = str(value)
elif urllib._is_unicode(value):
params[key] = value.encode('UTF-8')
elif isinstance(value, list):
params[key] = ','.join([item for item in value])
def api_request(self, endpoint=None, method='GET', params={}, files=None, replace=False):
headers = {}
headers.update(self.headers)
headers.update({'Content-Type': 'application/json'})
headers.update({'Content-Length': '0'})
if endpoint is None and files is None:
raise FlickrAPIError('Please supply an API endpoint to hit.')
qs = {
'format': 'json',
'nojsoncallback': 1,
'method': endpoint,
'api_key': self.api_key
}
self._convert_params(params)
if method == 'POST':
if files is not None:
# To upload/replace file, we need to create a fake request
# to sign parameters that are not multipart before we add
# the multipart file to the parameters...
# OAuth is not meant to sign multipart post data
http_url = self.replace_api_url if replace else self.upload_api_url
faux_req = oauth.Request.from_consumer_and_token(self.consumer,
token=self.token,
http_method="POST",
http_url=http_url,
parameters=params)
faux_req.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
self.consumer,
self.token)
fields = dict(parse_qsl(faux_req.to_postdata()))
body, content_type = self.encode_multipart_formdata(fields, files)
headers.update({
'Content-Type': content_type,
'Content-Length': str(len(body))
})
req = urlrequest(http_url, body, headers)
try:
req = urlopen(req)
except Exception as e:
# Making a fake resp var because urllib2.urlopen doesn't
# return a tuple like OAuth2 client.request does
resp = {'status': e.code}
content = e.read()
# If no error, assume response was 200
resp = {'status': 200}
content = req.read()
content = etree.XML(content)
stat = content.get('stat') or 'ok'
if stat == 'fail':
if content.find('.//err') is not None:
code = content.findall('.//err[@code]')
msg = content.findall('.//err[@msg]')
if len(code) > 0:
if len(msg) == 0:
msg = 'An error occurred making your Flickr API request.'
else:
msg = msg[0].get('msg')
code = int(code[0].get('code'))
content = {
'stat': 'fail',
'code': code,
'message': msg
}
else:
photoid = content.find('.//photoid')
if photoid is not None:
photoid = photoid.text
content = {
'stat': 'ok',
'photoid': photoid
}
else:
url = self.rest_api_url + '?' + urlencode(qs) + '&' + urlencode(params)
resp, content = self.get_http().request(url, 'POST', headers=headers)
else:
params.update(qs)
url = '%s?%s' % (self.rest_api_url, urlencode(params))
resp, content = self.get_http().request(url, 'GET', headers=headers)
status = int(resp['status'])
if status < 200 or status >= 300:
raise FlickrAPIError('Flickr returned a Non-200 response.', error_code=status)
#try except for if content is able to be decoded
try:
if type(content) != dict:
content = json.loads(content)
except ValueError:
raise FlickrAPIError('Content is not valid JSON, unable to be decoded.')
if content.get('stat') and content['stat'] == 'fail':
raise FlickrAPIError('Flickr returned error code: %d. Message: %s' % \
(content['code'], content['message']),
error_code=content['code'])
return dict(content)
def get(self, endpoint=None, params=None):
params = params or {}
return self.api_request(endpoint, method='GET', params=params)
def post(self, endpoint=None, params=None, files=None, replace=False):
params = params or {}
return self.api_request(endpoint, method='POST', params=params, files=files, replace=replace)
def encode_multipart_formdata(self, fields, files, boundary=None):
""" Encodes fields and files for uploading.
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, path) elements for files - or a dictionary.
Return (body, content_type) ready for urllib2.Request instance
"""
if boundary is None:
boundary = '_' + str(random.random()) + '_' + str(random.random()) + '_'
body = BytesIO()
cbody = UTF8(body)
if isinstance(fields, dict):
fields = fields.items()
for field, value in fields:
cbody.write('--%s\r\n' % (boundary))
cbody.write('Content-Disposition: form-data; name="%s"\r\n' % (field))
cbody.write('Content-Type: text/plain\r\n\r\n')
cbody.write(value)
cbody.write('\r\n')
if isinstance(files, dict):
files = files.items()
for (field, path) in files:
cbody.write('--%s\r\n' % (boundary))
cbody.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' %
(field, urlquote(os.path.basename(path).encode('utf8'))))
cbody.write('Content-Type: %s\r\n\r\n' % (mimetypes.guess_type(path)[0] or 'application/octet-stream'))
with open(path, 'rb') as f:
while True:
d = f.read(1048576)
if not d:
break
body.write(d)
cbody.write('\r\n')
cbody.write('--%s--\r\n' % (boundary))
content_type = 'multipart/form-data; boundary=%s' % boundary
return body.getvalue(), content_type
#------------------------------------------------------
class FAlbum:
def __init__(self, a):
self.id = a.get('id')
self.title = get_json_item(a.get('title'))
self.description= get_json_item(a.get('description'))
self.items = int(a.get('photos', '0')) + int(a.get('videos', 0))
class FPhoto:
def __init__(self, | |
while True:
x_e_prev = x_e
# (8) b_s
num = pd.DataFrame(x_es - np.tile(x_e, (S, 1)).T).sum(axis=0) # sum over e
den = pd.DataFrame(x_es/x_es).sum(axis=0) # sum over e
b_s_new = num / den
b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
if use_log:
# (9') log_v_s
num = pd.DataFrame(-np.ones([E, S]) + a_es**2 / np.tile(v_s**2, (E, 1))).sum(axis=0) # sum over e
den = pd.DataFrame(-2 * a_es**2 / np.tile(v_s**2, (E, 1))).sum(axis=0) # sum over e
log_v_s_new = log_v_s - num / den
log_v_s = log_v_s * (1.0 - REFRESH_RATE) + log_v_s_new * REFRESH_RATE
v_s = np.exp(log_v_s)
else:
# (9) v_s
num = pd.DataFrame(2 * np.ones([E, S]) * np.tile(v_s**3, (E, 1)) - 4 * np.tile(v_s, (E, 1)) * a_es**2).sum(axis=0) # sum over e
den = pd.DataFrame(np.ones([E, S]) * np.tile(v_s**2, (E, 1)) - 3 * a_es**2).sum(axis=0) # sum over e
v_s_new = num / den
v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
# v_s = np.maximum(v_s, np.zeros(v_s.shape))
# (7) x_e
num = pd.DataFrame((x_es - np.tile(b_s, (E, 1))) / np.tile(v_s**2, (E, 1))).sum(axis=1) # sum along s
den = pd.DataFrame(x_es/x_es / np.tile(v_s**2, (E, 1))).sum(axis=1) # sum along s
x_e_new = num / den
x_e = x_e * (1.0 - REFRESH_RATE) + x_e_new * REFRESH_RATE
itr += 1
delta_x_e = linalg.norm(x_e_prev - x_e)
msg = 'Iteration {itr:4d}: change {delta_x_e}, mean x_e {x_e}, mean b_s {b_s}, mean v_s {v_s}'.\
format(itr=itr, delta_x_e=delta_x_e, x_e=np.mean(x_e), b_s=np.mean(b_s), v_s=np.mean(v_s))
sys.stdout.write(msg + '\r')
sys.stdout.flush()
# time.sleep(0.001)
if delta_x_e < DELTA_THR:
break
if itr >= MAX_ITR:
break
sys.stdout.write("\n")
result = {
'quality_scores': list(x_e),
'observer_bias': list(b_s),
'observer_inconsistency': list(v_s),
}
try:
observers = dataset_reader._get_list_observers # may not exist
result['observers'] = observers
except AssertionError:
pass
return result
class MaximumLikelihoodEstimationModel(SubjectiveModel):
"""
Generative model that considers individual subjective (or observer)'s bias
and inconsistency, as well as content's bias and ambiguity.
The observed score is modeled by:
X_e,s = x_e + B_e,s + A_e,s
where x_e is the true quality of distorted video e, and B_e,s ~ N(b_s, v_s)
is the term representing observer s's bias (b_s) and inconsistency (v_s).
A_e,s ~ N(0, a_c), where c is a function of e, or c = c(e), represents
content c's ambiguity (a_c). The model is then solved via maximum
likelihood estimation using belief propagation.
"""
# TYPE = 'Subject/Content-Aware'
TYPE = 'MLE' # maximum likelihood estimation
# VERSION = '0.1'
VERSION = '0.2' # added confidence interval for parameters
mode = 'DEFAULT'
DEFAULT_GRADIENT_METHOD = 'simplified'
@staticmethod
def loglikelihood_fcn(x_es, x_e, b_s, v_s, a_c, content_id_of_dis_videos, axis):
E, S = x_es.shape
a_c_e = np.array(map(lambda i: a_c[i], content_id_of_dis_videos))
a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
vs2_add_ace2 = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
ret = - 1.0 / 2 * np.log(vs2_add_ace2) - 1.0 / 2 * a_es**2 / vs2_add_ace2
ret = pd.DataFrame(ret).sum(axis=axis)
return ret
@classmethod
def _run_modeling(cls, dataset_reader, **kwargs):
# mode: DEFAULT - subject and content-aware
# NO_SUBJECT - subject-unaware
# NO_CONTENT - content-unaware
if 'subject_rejection' in kwargs and kwargs['subject_rejection'] is True:
assert False, 'SubjectAndContentAwareGenerativeModel must not ' \
'and need not apply subject rejection.'
gradient_method = kwargs['gradient_method'] if 'gradient_method' in kwargs else cls.DEFAULT_GRADIENT_METHOD
assert gradient_method == 'simplified' or gradient_method == 'original' or gradient_method == 'numerical'
def sum_over_content_id(xs, cids):
assert len(xs) == len(cids)
num_c = np.max(cids) + 1
assert sorted(list(set(cids))) == range(num_c)
sums = np.zeros(num_c)
for x, cid in zip(xs, cids):
sums[cid] += x
return sums
def std_over_subject_and_content_id(x_es, cids):
assert x_es.shape[0] == len(cids)
num_c = np.max(cids) + 1
assert sorted(list(set(cids))) == range(num_c)
ls = [[] for _ in range(num_c)]
for idx_cid, cid in enumerate(cids):
ls[cid] = ls[cid] + list(x_es[idx_cid, :])
stds = []
for l in ls:
stds.append(pd.Series(l).std(ddof=0))
return np.array(stds)
x_es = cls._get_opinion_score_2darray_with_preprocessing(dataset_reader, **kwargs)
E, S = x_es.shape
C = dataset_reader.num_ref_videos
# === initialization ===
mos = np.array(MosModel(dataset_reader).run_modeling()['quality_scores'])
x_e = mos # use MOS as initial value for x_e
b_s = np.zeros(S)
r_es = x_es - np.tile(x_e, (S, 1)).T # r_es: residual at e, s
if cls.mode == 'NO_SUBJECT':
v_s = np.zeros(S)
else:
v_s = pd.DataFrame(r_es).std(axis=0, ddof=0) # along e
if cls.mode == 'NO_CONTENT':
a_c = np.zeros(C)
else:
a_c = std_over_subject_and_content_id(
r_es, dataset_reader.content_id_of_dis_videos)
x_e_std = None
b_s_std = None
v_s_std = None
a_c_std = None
# === iterations ===
MAX_ITR = 10000
REFRESH_RATE = 0.1
DELTA_THR = 1e-8
EPSILON = 1e-3
print '=== Belief Propagation ==='
itr = 0
while True:
x_e_prev = x_e
# ==== (12) b_s ====
if gradient_method == 'simplified':
a_c_e = np.array(map(lambda i: a_c[i], dataset_reader.content_id_of_dis_videos))
num_num = x_es - np.tile(x_e, (S, 1)).T
num_den = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
num = pd.DataFrame(num_num / num_den).sum(axis=0) # sum over e
den_num = x_es / x_es # 1 and nan
den_den = num_den
den = pd.DataFrame(den_num / den_den).sum(axis=0) # sum over e
b_s_new = num / den
b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
b_s_std = 1.0 / np.sqrt(den) # calculate std of x_e
elif gradient_method == 'original':
a_c_e = np.array(map(lambda i: a_c[i], dataset_reader.content_id_of_dis_videos))
vs2_add_ace2 = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
order1 = (x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))) / vs2_add_ace2
order1 = pd.DataFrame(order1).sum(axis=0) # sum over e
order2 = - (x_es / x_es) / vs2_add_ace2
order2 = pd.DataFrame(order2).sum(axis=0) # sum over e
b_s_new = b_s - order1 / order2
b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
b_s_std = 1.0 / np.sqrt(-order2) # calculate std of x_e
elif gradient_method == 'numerical':
axis = 0 # sum over e
order1 = (cls.loglikelihood_fcn(x_es, x_e, b_s + EPSILON / 2.0, v_s, a_c, dataset_reader.content_id_of_dis_videos, axis=axis) -
cls.loglikelihood_fcn(x_es, x_e, b_s - EPSILON / 2.0, v_s, a_c, dataset_reader.content_id_of_dis_videos, axis=axis)) / EPSILON
order2 = (cls.loglikelihood_fcn(x_es, x_e, b_s + EPSILON, v_s, a_c, dataset_reader.content_id_of_dis_videos, axis=axis)
- 2 * cls.loglikelihood_fcn(x_es, x_e, b_s, v_s, a_c, dataset_reader.content_id_of_dis_videos, axis=axis)
+ cls.loglikelihood_fcn(x_es, x_e, b_s - EPSILON, v_s, a_c, dataset_reader.content_id_of_dis_videos, axis=axis)) / EPSILON**2
b_s_new = b_s - order1 / order2
b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
b_s_std = 1.0 / np.sqrt(-order2) # calculate std of x_e
else:
assert False
if cls.mode == 'NO_SUBJECT':
b_s = np.zeros(S) # forcing zero, hence disabling
b_s_std = np.zeros(S)
# ==== (14) v_s ====
if gradient_method == 'simplified':
a_c_e = np.array(map(lambda i: a_c[i], dataset_reader.content_id_of_dis_videos))
a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
vs2_add_ace2 = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
vs2_minus_ace2 = np.tile(v_s**2, (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
num = - np.tile(v_s, (E, 1)) / vs2_add_ace2 + np.tile(v_s, (E, 1)) * a_es**2 / vs2_add_ace2**2
num = pd.DataFrame(num).sum(axis=0) # sum over e
poly_term = np.tile(a_c_e**4, (S, 1)).T \
- 3 * np.tile(v_s**4, (E, 1)) \
- 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
den = vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
den = pd.DataFrame(den).sum(axis=0) # sum over e
v_s_new = v_s - num / den
v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
# calculate std of v_s
lpp = pd.DataFrame(
vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
).sum(axis=0) # sum over e
v_s_std = 1.0 / np.sqrt(-lpp)
elif gradient_method == 'original':
a_c_e = np.array(map(lambda i: a_c[i], dataset_reader.content_id_of_dis_videos))
a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
vs2_add_ace2 = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
vs2_minus_ace2 = np.tile(v_s**2, (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
poly_term = np.tile(a_c_e**4, (S, 1)).T \
- 3 * np.tile(v_s**4, (E, 1)) \
- 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
order1 = - np.tile(v_s, (E, 1)) / vs2_add_ace2 + np.tile(v_s, (E, 1)) * a_es**2 / vs2_add_ace2**2
order1 = pd.DataFrame(order1).sum(axis=0) # sum over e
order2 = vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
order2 = pd.DataFrame(order2).sum(axis=0) # sum over e
v_s_new = v_s - order1 / order2
v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
v_s_std = 1.0 | |
<filename>jumeg/jumeg_volume_plotting.py<gh_stars>1-10
from os import path as op
import numpy as np
from matplotlib import pyplot as plt
import time as time2
from time import strftime, localtime
from nibabel.affines import apply_affine
from nilearn import plotting
from nilearn.image import index_img
from nilearn.plotting.img_plotting import _MNI152Template
MNI152TEMPLATE = _MNI152Template()
def plot_vstc(vstc, vsrc, tstep, subjects_dir, time_sample=None, coords=None,
figure=None, axes=None, cmap='magma', symmetric_cbar=False,
threshold='min', save=False, fname_save=None):
""" Plot a volume source space estimation.
Parameters
----------
vstc : VolSourceEstimate
The volume source estimate.
vsrc : instance of SourceSpaces
The source space of the subject equivalent to the
subject.
tstep : scalar
Time step between successive samples in data.
subjects_dir : str
The path to the subjects directory.
time_sample : int, float | None
None is default for finding the time sample with the voxel with global
maximal amplitude. If int, float the given time sample is selected and
plotted.
coords : arr | None
None is default for finding the coordinates with the maximal amplitude
for the given or automatically found time sample
figure : integer | matplotlib.figure | None
Specify the figure container to plot in or its number. If None is
given, a new figure is created.
axes : matplotlib.figure.axes | None
Specify the axes of the given figure to plot in. Only necessary if
a figure is passed.
threshold : a number, None, 'auto', or 'min'
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image:
values below the threshold (in absolute value) are plotted
as transparent. If auto is given, the threshold is determined
magically by analysis of the image.
cmap : matplotlib colormap, optional
The colormap for specified image. The ccolormap *must* be
symmetrical.
symmetric_cbar : boolean or 'auto', optional, default 'auto'
Specifies whether the colorbar should range from -vmax to vmax
or from vmin to vmax. Setting to 'auto' will select the latter if
the range of the whole image is either positive or negative.
Note: The colormap will always be set to range from -vmax to vmax.
save : bool | None
Default is False. If True the plot is forced to close and written to disk
at fname_save location
fname_save : string
The path where to save the plot.
Returns
-------
Figure : matplotlib.figure
VolSourceEstimation plotted for given or 'auto' coordinates at given
or 'auto' timepoint.
"""
vstcdata = vstc.data
img = vstc.as_volume(vsrc, dest='mri', mri_resolution=False)
subject = vsrc[0]['subject_his_id']
if vstc == 0:
if tstep is not None:
img = _make_image(vstc, vsrc, tstep, dest='mri', mri_resolution=False)
else:
print(' Please provide the tstep value !')
img_data = img.get_data()
aff = img.affine
if time_sample is None:
# global maximum amp in time
t = int(np.where(np.sum(vstcdata, axis=0) == np.max(np.sum(vstcdata, axis=0)))[0])
else:
print(' Time slice', time_sample)
t = time_sample
t_in_ms = vstc.times[t] * 1e3
print(' Found time slice: ', t_in_ms, 'ms')
if coords is None:
cut_coords = np.where(img_data == img_data[:, :, :, t].max())
max_try = np.concatenate((np.array([cut_coords[0][0]]),
np.array([cut_coords[1][0]]),
np.array([cut_coords[2][0]])))
cut_coords = apply_affine(aff, max_try)
else:
cut_coords = coords
slice_x, slice_y = int(cut_coords[0]), int(cut_coords[1])
slice_z = int(cut_coords[2])
print((' Coords [mri-space]:'
+ 'X: ', slice_x, 'Y: ', slice_y, 'Z: ', slice_z))
temp_t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if threshold == 'min':
threshold = vstcdata.min()
vstc_plt = plotting.plot_stat_map(index_img(img, t), temp_t1_fname,
figure=figure, axes=axes,
display_mode='ortho',
threshold=threshold,
annotate=True,
title='%s | t=%.2f ms'
% (subject, t_in_ms),
cut_coords=(slice_x, slice_y, slice_z),
cmap=cmap, symmetric_cbar=symmetric_cbar)
if save:
if fname_save is None:
print('please provide an filepath to save .png')
else:
plt.savefig(fname_save)
plt.close()
return vstc_plt
def plot_vstc_sliced_grid(subjects_dir, vstc, vsrc, title, cut_coords,
time=None, display_mode='x', cmap='magma',
threshold='min', cbar_range=None, grid=None,
res_save=None, fn_image='plot.png',
overwrite=False):
"""
Parameters:
-----------
subjects_dir : str
The path to the subjects directory.
vstc : VolSourceEstimate
The volume source estimate.
vsrc : instance of SourceSpaces
The source space of the subject equivalent to the
subject.
title : str
Title for the plot.
cut_coords : list
The MNI coordinates of the points where the cuts are performed
For display_mode == 'x', 'y', or 'z', then these are the
coordinates of each cut in the corresponding direction.
len(cut_coords) has to match grid[0]*grid[1].
time : float
Time point for which the image will be created.
display_mode : 'x', 'y', 'z'
Direction in which the brain is sliced.
cmap : str
Name of the matplotlib color map to use.
See https://matplotlib.org/examples/color/colormaps_reference.html
threshold : a number, None, 'auto', or 'min'
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image:
values below the threshold (in absolute value) are plotted
as transparent. If auto is given, the threshold is determined
magically by analysis of the image.
cbar_range : None, 2-tuple
Color range of the plot.
grid : None | 2-tuple
Specifies how many images per row and column are to be depicted.
If grid is None it defaults to [4, 6]
res_save : None | 2-tuple
Resolution of the saved image in pixel.
If res_save is None it defaults to [1920, 1080]
fn_image : str
File name for the saved image.
overwrite : bool
Overwrite an existing image.
Returns:
--------
None
"""
if grid is None:
grid = [4, 6]
if res_save is None:
res_save = [1920, 1080]
if display_mode not in {'x', 'y', 'z'}:
raise ValueError("display_mode must be one of 'x', 'y', or 'z'.")
if len(cut_coords) != grid[0]*grid[1]:
raise ValueError("len(cut_coords) has to match the size of the grid (length must be grid[0]*grid[1]=%d)"
% grid[0] * grid[1])
if not op.exists(fn_image) or overwrite:
start_time = time2.time()
print(strftime('Start at %H:%M:%S on the %d.%m.%Y \n', localtime()))
figure, axes = plt.subplots(grid[0], grid[1])
axes = axes.flatten()
params_plot_img_with_bg = get_params_for_grid_slice(vstc, vsrc, vstc.tstep, subjects_dir,
cbar_range=cbar_range)
for i, (ax, z) in enumerate(zip(axes, cut_coords)):
# to get a single slice in plot_vstc_grid_sliced this has to be a list of a single float
cut_coords_slice = [z]
colorbar = False
if grid[1] - 1 == i:
colorbar = True
vstc_plot = plot_vstc_grid_slice(vstc=vstc, params_plot_img_with_bg=params_plot_img_with_bg, time=time,
cut_coords=cut_coords_slice, display_mode=display_mode,
figure=figure, axes=ax, colorbar=colorbar, cmap=cmap,
threshold=threshold)
plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95,
wspace=0, hspace=0)
if title is not None:
plt.suptitle(title)
DPI = figure.get_dpi()
figure.set_size_inches(res_save[0] / float(DPI), res_save[1] / float(DPI))
# bbox_inches='tight' not useful for images for videos, see:
# https://github.com/matplotlib/matplotlib/issues/8543#issuecomment-400679840
frmt = fn_image.split('.')[-1]
print(DPI, figure.get_size_inches())
plt.savefig(fn_image, format=frmt, dpi=DPI)
plt.close()
end_time = time2.time()
print(strftime('End at %H:%M:%S on the %d.%m.%Y \n', localtime()))
minutes = (end_time - start_time) / 60
seconds = (end_time - start_time) % 60
print("Calculation took %d minutes and %d seconds" % (minutes, seconds))
print("")
else:
print("File %s exists." % fn_image)
def get_params_for_grid_slice(vstc, vsrc, tstep, subjects_dir, cbar_range=None, **kwargs):
"""
Makes calculations that would be executed repeatedly every time a slice is
computed and saves the results in a dictionary which is then read by
plot_vstc_grid_slice().
Parameters:
-----------
vstc : mne.VolSourceEstimate
The volume source estimate.
vsrc : mne.SourceSpaces
The source space of the subject equivalent to the
tstep : int
Time step between successive samples in data.
subjects_dir:
Path to the subject directory.
cbar_range : None, 2-tuple
Color range of the plot.
Returns:
--------
params_plot_img_with_bg : dict
Dictionary containing the parameters for plotting.
"""
img = vstc.as_volume(vsrc, dest='mri', mri_resolution=False)
# TODO: why should vstc ever be 0?
if vstc == 0:
# TODO: how would _make_image work if vstc is zero anyways?
if tstep is not None:
img = _make_image(vstc, vsrc, tstep, dest='mri', mri_resolution=False)
else:
print(' Please provide the tstep value !')
subject = vsrc[0]['subject_his_id']
temp_t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
bg_img = temp_t1_fname
dim = 'auto'
black_bg = 'auto'
vmax = None
symmetric_cbar = False
from nilearn.plotting.img_plotting import _load_anat, _get_colorbar_and_data_ranges
from nilearn._utils import check_niimg_4d
from nilearn._utils.niimg_conversions import _safe_get_data
bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim, black_bg=black_bg)
stat_map_img = check_niimg_4d(img, dtype='auto')
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
_safe_get_data(stat_map_img, ensure_finite=True), vmax, symmetric_cbar, kwargs)
if cbar_range is not None:
cbar_vmin = cbar_range[0]
cbar_vmax = cbar_range[1]
vmin = cbar_range[0]
vmax = cbar_range[1]
params_plot_img_with_bg = dict()
params_plot_img_with_bg['bg_img'] = bg_img
params_plot_img_with_bg['black_bg'] = black_bg
params_plot_img_with_bg['bg_vmin'] = bg_vmin
params_plot_img_with_bg['bg_vmax'] = bg_vmax
params_plot_img_with_bg['stat_map_img'] = stat_map_img
params_plot_img_with_bg['cbar_vmin'] = cbar_vmin
params_plot_img_with_bg['cbar_vmax'] = cbar_vmax
params_plot_img_with_bg['vmin'] = vmin
params_plot_img_with_bg['vmax'] = | |
# encoding: utf-8
u"""A module for coding standards tests.
These are tests that are not functional- or unit-testing any particular piece
of CKAN code, but are checking coding standards. For example: checking that
there are no errors in the Sphinx build, that there are no PEP8 problems,
etc.
"""
import importlib
import inspect
import io
import itertools
import os
import os.path
import re
import subprocess
import sys
import pytest
import six
FILESYSTEM_ENCODING = str(
sys.getfilesystemencoding() or sys.getdefaultencoding()
)
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.normpath(os.path.join(HERE, u"..", u".."))
# Directories which are ignored when checking Python source code files
IGNORED_DIRS = [u"ckan/include", u"contrib/cookiecutter"]
def walk_python_files(ext=".py"):
u"""
Generator that yields all CKAN Python source files.
Yields 2-tuples containing the filename in absolute and relative (to
the project root) form.
"""
def _is_dir_ignored(root, d):
if d.startswith(u"."):
return True
return os.path.join(rel_root, d) in IGNORED_DIRS
for abs_root, dirnames, filenames in os.walk(PROJECT_ROOT):
rel_root = os.path.relpath(abs_root, PROJECT_ROOT)
if rel_root == u".":
rel_root = u""
dirnames[:] = [d for d in dirnames if not _is_dir_ignored(rel_root, d)]
for filename in filenames:
if not filename.endswith(ext):
continue
abs_name = os.path.join(abs_root, filename)
rel_name = os.path.join(rel_root, filename)
yield abs_name, rel_name
def output_errors(filename, errors):
out = [""]
out.append("-" * len(filename))
out.append(filename)
out.append("-" * len(filename))
for error in errors:
out.append(error)
return "\n".join(out)
def show_fails(msg, errors):
if errors:
msg = ["\n%s" % msg]
for error in errors:
msg.append(errors[error])
msg.append("\n\nFailing Files:\n==============")
msg += sorted(errors)
raise Exception("\n".join(msg))
def show_passing(msg, errors):
if errors:
raise Exception("\n%s\n\n" % msg + "\n".join(sorted(errors)))
class TestBadSpellings(object):
BAD_SPELLING_BLACKLIST_FILES = []
# these are the bad spellings with the correct spelling
# use LOWER case
BAD_SPELLINGS = {
# CS: bad_spelling ignore 2 lines
"licence": "license",
"organisation": "organization",
}
@pytest.fixture(scope="class")
def results(self):
fails = {}
passes = []
result = (fails, passes)
blacklist = self.BAD_SPELLING_BLACKLIST_FILES
re_bad_spelling = re.compile(
r"(%s)" % "|".join([x for x in self.BAD_SPELLINGS]),
flags=re.IGNORECASE,
)
files = itertools.chain.from_iterable(
[
walk_python_files(),
walk_python_files(ext=".rst"),
]
)
for path, filename in files:
f = open(path, "r")
count = 1
errors = []
for line in cs_filter(f, "bad_spelling"):
matches = re_bad_spelling.findall(line)
if matches:
bad_words = []
for m in matches:
if m not in bad_words:
bad_words.append(
"%s use %s" % (m, self.BAD_SPELLINGS[m.lower()])
)
bad = ", ".join(bad_words)
errors.append("ln:%s \t%s\n<%s>" % (count, line[:-1], bad))
count += 1
if errors and filename not in blacklist:
fails[filename] = output_errors(filename, errors)
elif not errors and filename in blacklist:
passes.append(filename)
return result
def test_good(self, results):
msg = "The following files passed bad spellings rules"
msg += "\nThey need removing from the test blacklist"
show_passing(msg, results[1])
def test_bad(self, results):
msg = "The following files have bad spellings that need fixing"
show_fails(msg, results[0])
def cs_filter(f, filter_, ignore_comment_lines=True):
"""filter the file removing comments if requested.
looks for comments like
# CS: <filter_> ignore
# CS: <filter_> ignore x line
and removes the requested number of lines. Lines are removed by
blanking so the line numbers reported will be correct. This allows us
to check files that have known violations of the test rules."""
# this RegEx is of poor quality but works
exp = r"^\s*#\s+CS:.*%s.*ignore\D*((\d+)\s+line)*"
re_ignore = re.compile(exp % filter_)
ignore = 0
out = []
count = 1
for line in f:
# ignore the line if we have been told too
if ignore > 0:
line = ""
ignore -= 1
matches = re_ignore.search(line)
if matches:
ignore = int(matches.group(2) or 1)
# ignore comments out lines
if ignore_comment_lines and line.lstrip().startswith("#"):
line = ""
out.append(line)
count += 1
return out
class TestImportStar(object):
"""Find files using from xxx import *"""
# Import * file exceptions
#
# The following files contain one or more `from ... import *` lines
# which should not be used in ckan where possible. If the files get
# fixed they should be removed from this list.
#
# import * is bad for many reasons and should be avoided.
IMPORT_STAR_BLACKLIST_FILES = [
"ckan/plugins/__init__.py",
]
@pytest.fixture(scope="class")
def results(self):
blacklist = self.IMPORT_STAR_BLACKLIST_FILES
re_import_star = re.compile(r"^\s*from\s+.*\simport\s+\*")
fails = {}
passes = []
for path, filename in walk_python_files():
f = open(path, "r")
count = 1
errors = []
for line in f:
if re_import_star.search(line):
errors.append(
"%s ln:%s import *\n\t%s" % (filename, count, line)
)
count += 1
if errors and filename not in blacklist:
fails[filename] = output_errors(filename, errors)
elif not errors and filename in blacklist:
passes.append(filename)
return fails, passes
def test_import_good(self, results):
msg = "The following files passed import * rules"
msg += "\nThey need removing from the test blacklist"
show_passing(msg, results[1])
def test_import_bad(self, results):
msg = (
"The following files have import * issues that need resolving\n"
"`from ... import *` lines which should not be used in ckan"
" where possible."
)
show_fails(msg, results[0])
def test_building_the_docs():
u"""There should be no warnings or errors when building the Sphinx docs.
This test will also fail is build_sphinx exits with non-zero status.
"""
try:
output = subprocess.check_output(
[b"python", b"setup.py", b"build_sphinx"], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as err:
assert (
False
), u"Building the docs failed with return code: {code}".format(
code=err.returncode
)
output_lines = output.decode("utf8").split("\n")
errors = [line for line in output_lines if "ERROR" in line]
if errors:
assert False, (
u"Don't add any errors to the Sphinx build: \n"
u"{errors}".format(errors="\n".join(errors))
)
warnings = [line for line in output_lines if "WARNING" in line]
if warnings:
assert False, (
u"Don't add any new warnings to the Sphinx build: \n"
u"{warnings}".format(warnings="\n".join(warnings))
)
def test_source_files_specify_encoding():
u"""
Test that *.py files have a PEP 263 UTF-8 encoding specification.
Empty files and files that only contain comments are ignored.
"""
pattern = re.compile(u"#.*?coding[:=][ \\t]*utf-?8")
decode_errors = []
no_specification = []
for abs_path, rel_path in walk_python_files():
try:
with io.open(abs_path, encoding=u"utf-8") as f:
for line in f:
line = line.strip()
if pattern.match(line):
# Pattern found
break
elif line and not line.startswith(u"#"):
# File contains non-empty non-comment line
no_specification.append(rel_path)
break
except UnicodeDecodeError:
decode_errors.append(rel_path)
msgs = []
if no_specification:
msgs.append(
u"The following files are missing an encoding specification: "
u"{}".format(no_specification)
)
if decode_errors:
msgs.append(
u"The following files are not valid UTF-8: "
u"{}".format(decode_errors)
)
if msgs:
assert False, u"\n\n".join(msgs)
class TestActionAuth(object):
"""These tests check the logic auth/action functions are compliant. The
main tests are that each action has a corresponding auth function and
that each auth function has an action. We check the function only
accepts (context, data_dict) as parameters."""
ACTION_FN_SIGNATURES_BLACKLIST = ["create: activity_create"]
ACTION_NO_AUTH_BLACKLIST = [
"create: follow_dataset",
"create: follow_group",
"create: follow_user",
"delete: unfollow_dataset",
"delete: unfollow_group",
"delete: unfollow_user",
"get: am_following_dataset",
"get: am_following_group",
"get: am_following_user",
"get: dataset_followee_count",
"get: dataset_follower_count",
"get: followee_count",
"get: group_followee_count",
"get: group_follower_count",
"get: group_package_show",
"get: member_list",
"get: organization_follower_count",
"get: recently_changed_packages_activity_list",
"get: resource_search",
"get: roles_show",
"get: status_show",
"get: tag_search",
"get: term_translation_show",
"get: user_followee_count",
"get: user_follower_count",
"update: task_status_update_many",
"update: term_translation_update_many",
]
AUTH_NO_ACTION_BLACKLIST = [
"create: file_upload",
"delete: revision_delete",
"delete: revision_undelete",
"get: activity_list",
"get: group_list_available",
"get: sysadmin",
"get: request_reset",
"get: user_reset",
"update: group_change_state",
"update: group_edit_permissions",
"update: package_change_state",
"update: revision_change_state",
]
ACTION_NO_DOC_STR_BLACKLIST = ["get: get_site_user"]
@pytest.fixture(scope="class")
def results(self):
def get_functions(module_root):
import ckan.authz as authz
fns = {}
for auth_module_name in [
"get",
"create",
"update",
"delete",
"patch",
]:
module_path = "%s.%s" % (module_root, auth_module_name)
module = importlib.import_module(module_path)
members = authz.get_local_functions(module)
for key, v in members:
name = "%s: %s" % (auth_module_name, key)
fns[name] = v
return fns
actions = get_functions("logic.action")
auths = get_functions("logic.auth")
return actions, auths
def test_actions_have_auth_fn(self, results):
actions_no_auth = set(results[0].keys()) - set(results[1].keys())
actions_no_auth -= set(self.ACTION_NO_AUTH_BLACKLIST)
assert (
not actions_no_auth
), "These actions have no auth function\n%s" % "\n".join(
sorted(list(actions_no_auth))
)
def test_actions_have_auth_fn_blacklist(self, results):
actions_no_auth = set(results[0].keys()) & set(results[1].keys())
actions_no_auth &= set(self.ACTION_NO_AUTH_BLACKLIST)
assert (
not actions_no_auth
), "These actions blacklisted but " + "shouldn't be \n%s" % "\n".join(
sorted(list(actions_no_auth))
)
def test_auths_have_action_fn(self, results):
auths_no_action = set(results[1].keys()) - set(results[0].keys())
auths_no_action -= set(self.AUTH_NO_ACTION_BLACKLIST)
assert (
not auths_no_action
), "These auth functions have no action\n%s" % "\n".join(
sorted(list(auths_no_action))
)
def test_auths_have_action_fn_blacklist(self, results):
auths_no_action = set(results[1].keys()) & set(results[0].keys())
auths_no_action &= set(self.AUTH_NO_ACTION_BLACKLIST)
assert not auths_no_action, (
"These auths functions blacklisted but"
+ " shouldn't be \n%s" % "\n".join(sorted(list(auths_no_action)))
)
def test_fn_signatures(self, results):
errors = []
for name, fn in six.iteritems(results[0]):
args_info = inspect.getargspec(fn)
if (
args_info.args != ["context", "data_dict"]
or args_info.varargs is not None
or args_info.keywords is | |
# partial implementation of the SQL layer described here:
# https://forcedotcom.github.io/phoenix/index.html
from enum import Enum
def perror(msg):
print("Error:", msg)
quit()
def tokenize_sql(text):
l = 0
r = 0
special_tokens = [
'<>', '<=', '>=', '=', '<', '>', '!=',
'(', ')', ';', '+', '-', '*', '/', '\'',
'.',
]
while r < len(text):
# skip whitespace
while r < len(text) and text[r].isspace():
r += 1
# EOF
if r >= len(text):
pass
# word token
elif text[r].isalpha():
l = r
r += 1
while r < len(text) and text[r].isalnum():
r += 1
yield text[l:r]
# number token
elif text[r].isnumeric() and text[r] != '0':
l = r
r += 1
while r < len(text) and text[r].isnumeric():
r += 1
yield text[l:r]
# special token
elif any(text[r:].startswith(tok) for tok in special_tokens):
l = r
for tok in special_tokens:
if text[r:].startswith(tok):
r = l + len(tok)
yield text[l:r]
break
else:
perror("Invalid token at TODO")
class NodeKind(Enum):
Select = 'Select'
SelectExpression = 'SelectExpression'
Token = 'Token'
TableExpression = 'TableExpression'
Name = 'Name'
Expression = 'Expression'
AndCondition = 'AndCondition'
Condition = 'Condition'
Boolean = 'Boolean'
Int = 'Int'
Decimal = 'Decimal'
Number = 'Number'
Numeric = 'Numeric'
Value = 'Value'
Term = 'Term'
Factor = 'Factor'
Summand = 'Summand'
Operand = 'Operand'
Long = 'Long'
ColumnRef = 'ColumnRef'
Compare = 'Compare'
class Node:
def __init__(self, name, kind, children=None):
self.name = name
self.kind = kind
if children is None:
self.children = []
else:
self.children = children
def consume_token(tokens, idx):
if idx == len(tokens):
return None, idx
else:
return tokens[idx], idx + 1
def parse_quoted_name(tokens, idx):
print('TODO implement quoted name')
return None, idx
def parse_name(tokens, idx):
child, idx = parse_quoted_name(tokens, idx)
if child:
return child
else:
tok, idx = consume_token(tokens, idx)
if not tok:
perror("Expected token")
elif not (tok[0].isalpha() or tok[0] == '_'):
idx -= 1
return None, idx
else:
for c in tok[1:]:
if not (c.isalnum() or c == '_'):
idx -= 1
return None, idx
else:
pass
node = Node(name=tok, kind=NodeKind.Name)
return node, idx
def parse_null(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() == 'NULL':
node = Node(name=tok, kind=NodeKind.Null)
return node, idx
else:
idx -= 1
return None, idx
def parse_boolean(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() in ['TRUE', 'FALSE']:
node = Node(name=tok, kind=NodeKind.Boolean)
return node, idx
else:
idx -= 1
return None, idx
def parse_number(tokens, idx):
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.isnumeric():
node = Node(name=tok, kind=NodeKind.Number)
return node, idx
else:
idx -= 1
return None, idx
def parse_decimal(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Decimal)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child:
node.children.append(child)
tok, idx = consume_token(tokens, idx)
if not tok:
pass
elif tok == '.':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_number(tokens, idx)
if child:
node.children.append(child)
else:
perror("Expected number following . in decimal")
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_long(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Long)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child and -9223372036854775808 <= multiplier * int(child.name) <= 9223372036854775807:
node.children.append(child)
return node, idx
else:
return None, idx
def parse_int(tokens, idx):
tok, idx = consume_token(tokens, idx)
node = Node(name='', kind=NodeKind.Int)
if tok == '-':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
multiplier = -1
else:
idx -= 1
multiplier = 1
child, idx = parse_number(tokens, idx)
if child and -2147483648 <= multiplier * int(child.name) <= 2147483647:
node.children.append(child)
return node, idx
else:
return None, idx
def parse_numeric(tokens, idx):
child, idx = parse_int(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
child, idx = parse_long(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
child, idx = parse_decimal(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Numeric)
node.children.append(child)
return node, idx
else:
return None, idx
def parse_string(tokens, idx):
tok, idx = consume_token(tokens, idx)
if tok[0] == '\'' and tok[-1] == '\'':
node = Node(name=tok, kind=NodeKind.String)
return node, idx
else:
idx -= 1
return None, idx
def parse_value(tokens, idx):
child, idx = parse_string(tokens, idx)
node = Node(name='', kind=NodeKind.Value)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_numeric(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_boolean(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
child, idx = parse_null(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
return None, idx
return None, idx
def parse_column_ref(tokens, idx):
child, idx = parse_name(tokens, idx)
node = Node(name='', kind=NodeKind.ColumnRef)
if child:
# TODO
print('TODO: family name')
node.children.append(child)
return node, idx
else:
return None, idx
def parse_term(tokens, idx):
child, idx = parse_value(tokens, idx)
node = Node(name='', kind=NodeKind.Term)
if child:
node.children.append(child)
return node, idx
else:
print('TODO term: there are a lot of other cases that have not been implemented')
child, idx = parse_column_ref(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
print('TODO term: there are a lot of other cases that have not been implemented')
return None, idx
def parse_factor(tokens, idx):
child, idx = parse_term(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Factor)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok in ['*', '/']:
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_term(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected factor after + or - token')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_summand(tokens, idx):
child, idx = parse_factor(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Summand)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok in ['+', '-']:
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_factor(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected factor after + or - token')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_operand(tokens, idx):
child, idx = parse_summand(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Operand)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok == '||':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_summand(tokens, idx)
if child:
node.children.append(child)
else:
perror('Expected summand after ||')
else:
idx -= 1
return node, idx
else:
return None, idx
def parse_compare(tokens, idx):
compare_tokens = ['<>', '<=', '>=', '=', '<', '>', '!=']
tok, idx = consume_token(tokens, idx)
if tok in compare_tokens:
node = Node(name=tok, kind=NodeKind.Compare)
return node, idx
else:
idx -= 1
return None, idx
def parse_condition(tokens, idx):
child, idx = parse_operand(tokens, idx)
if child:
node = Node(name='', kind=NodeKind.Condition)
node.children.append(child)
child, idx = parse_compare(tokens, idx)
if child:
node.children.append(child)
child, idx = parse_operand(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
perror('Expected operand after compare')
else:
tok, idx = consume_token(tokens, idx)
if not tok:
print('TODO!') # TODO
elif tok.upper() == 'IN':
print('TODO!') # TODO
elif tok.upper() == 'LIKE':
print('TODO!') # TODO
elif tok.upper() == 'BETWEEN':
print('TODO!') # TODO
elif tok.upper() == 'IS':
print('TODO!') # TODO
elif tok.upper() == 'NOT':
print('TODO!') # TODO
else:
perror('Expected one of IN, LIKE, BETWEEN, IS, or NOT after operand')
return node, idx
else:
tok, idx = consume_token(tokens, idx)
if not tok:
return None, idx
elif tok.upper() == 'NOT':
child = Node(name='NOT', kind=NodeKind.Token)
node.children.append(child)
child, idx = parse_expression(tokens, idx)
if child:
node.children.append(child)
return node, idx
else:
perror("Expected expression after NOT")
elif tok == '(':
child, idx = parse_expression(tokens, idx)
if child:
node.children.append(child)
tok, idx = consume_token(tokens, idx)
if tok == ')':
child = Node(name=tok, kind=NodeKind.Token)
node.children.append(child)
return node, idx
else:
perror("Expected closing paren after expression")
else:
perror('Expected expression after \'(\'.')
else:
idx -= 1
return None, idx
def parse_and_condition(tokens, idx):
child, idx = parse_condition(tokens, idx)
if not child:
return None, idx
else:
node = Node(name='', kind=NodeKind.AndCondition)
node.children.append(child)
while True:
tok, idx = consume_token(tokens, idx)
if not tok:
return node, idx
elif tok.upper() == 'AND':
child, idx = parse_condition(tokens, idx)
if child:
node.children.append(child)
else:
| |
<reponame>qcgm1978/sympy
import random
import itertools
from typing import Sequence as tSequence, Union as tUnion, List as tList, Tuple as tTuple
from sympy import (Matrix, MatrixSymbol, S, Indexed, Basic, Tuple, Range,
Set, And, Eq, FiniteSet, ImmutableMatrix, Integer, igcd,
Lambda, Mul, Dummy, IndexedBase, Add, Interval, oo,
linsolve, eye, Or, Not, Intersection, factorial, Contains,
Union, Expr, Function, exp, cacheit, sqrt, pi, gamma,
Ge, Piecewise, Symbol, NonSquareMatrixError, EmptySet,
ceiling, MatrixBase, ConditionSet, ones, zeros, Identity,
Rational, Lt, Gt, Ne, BlockMatrix)
from sympy.core.relational import Relational
from sympy.logic.boolalg import Boolean
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import strongly_connected_components
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import (RandomIndexedSymbol, random_symbols, RandomSymbol,
_symbol_converter, _value_check, pspace, given,
dependent, is_random, sample_iter)
from sympy.stats.stochastic_process import StochasticPSpace
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.stats.frv_types import Bernoulli, BernoulliDistribution, FiniteRV
from sympy.stats.drv_types import Poisson, PoissonDistribution
from sympy.stats.crv_types import Normal, NormalDistribution, Gamma, GammaDistribution
from sympy.core.sympify import _sympify, sympify
__all__ = [
'StochasticProcess',
'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain',
'TransitionMatrixOf',
'StochasticStateSpaceOf',
'GeneratorMatrixOf',
'ContinuousMarkovChain',
'BernoulliProcess',
'PoissonProcess',
'WienerProcess',
'GammaProcess'
]
@is_random.register(Indexed)
def _(x):
return is_random(x.base)
@is_random.register(RandomIndexedSymbol) # type: ignore
def _(x):
return True
def _set_converter(itr):
"""
Helper function for converting list/tuple/set to Set.
If parameter is not an instance of list/tuple/set then
no operation is performed.
Returns
=======
Set
The argument converted to Set.
Raises
======
TypeError
If the argument is not an instance of list/tuple/set.
"""
if isinstance(itr, (list, tuple, set)):
itr = FiniteSet(*itr)
if not isinstance(itr, Set):
raise TypeError("%s is not an instance of list/tuple/set."%(itr))
return itr
def _state_converter(itr: tSequence) -> tUnion[Tuple, Range]:
"""
Helper function for converting list/tuple/set/Range/Tuple/FiniteSet
to tuple/Range.
"""
if isinstance(itr, (Tuple, set, FiniteSet)):
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, (list, tuple)):
# check if states are unique
if len(set(itr)) != len(itr):
raise ValueError('The state space must have unique elements.')
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
elif isinstance(itr, Range):
# the only ordered set in sympy I know of
# try to convert to tuple
try:
itr = Tuple(*(sympify(i) if isinstance(i, str) else i for i in itr))
except ValueError:
pass
else:
raise TypeError("%s is not an instance of list/tuple/set/Range/Tuple/FiniteSet." % (itr))
return itr
def _sym_sympify(arg):
"""
Converts an arbitrary expression to a type that can be used inside SymPy.
As generally strings are unwise to use in the expressions,
it returns the Symbol of argument if the string type argument is passed.
Parameters
=========
arg: The parameter to be converted to be used in Sympy.
Returns
=======
The converted parameter.
"""
if isinstance(arg, str):
return Symbol(arg)
else:
return _sympify(arg)
def _matrix_checks(matrix):
if not isinstance(matrix, (Matrix, MatrixSymbol, ImmutableMatrix)):
raise TypeError("Transition probabilities either should "
"be a Matrix or a MatrixSymbol.")
if matrix.shape[0] != matrix.shape[1]:
raise NonSquareMatrixError("%s is not a square matrix"%(matrix))
if isinstance(matrix, Matrix):
matrix = ImmutableMatrix(matrix.tolist())
return matrix
class StochasticProcess(Basic):
"""
Base class for all the stochastic processes whether
discrete or continuous.
Parameters
==========
sym: Symbol or str
state_space: Set
The state space of the stochastic process, by default S.Reals.
For discrete sets it is zero indexed.
See Also
========
DiscreteTimeStochasticProcess
"""
index_set = S.Reals
def __new__(cls, sym, state_space=S.Reals, **kwargs):
sym = _symbol_converter(sym)
state_space = _set_converter(state_space)
return Basic.__new__(cls, sym, state_space)
@property
def symbol(self):
return self.args[0]
@property
def state_space(self) -> tUnion[FiniteSet, Range]:
if not isinstance(self.args[1], (FiniteSet, Range)):
return FiniteSet(*self.args[1])
return self.args[1]
@property
def distribution(self):
return None
def __call__(self, time):
"""
Overridden in ContinuousTimeStochasticProcess.
"""
raise NotImplementedError("Use [] for indexing discrete time stochastic process.")
def __getitem__(self, time):
"""
Overridden in DiscreteTimeStochasticProcess.
"""
raise NotImplementedError("Use () for indexing continuous time stochastic process.")
def probability(self, condition):
raise NotImplementedError()
def joint_distribution(self, *args):
"""
Computes the joint distribution of the random indexed variables.
Parameters
==========
args: iterable
The finite list of random indexed variables/the key of a stochastic
process whose joint distribution has to be computed.
Returns
=======
JointDistribution
The joint distribution of the list of random indexed variables.
An unevaluated object is returned if it is not possible to
compute the joint distribution.
Raises
======
ValueError: When the arguments passed are not of type RandomIndexSymbol
or Number.
"""
args = list(args)
for i, arg in enumerate(args):
if S(arg).is_Number:
if self.index_set.is_subset(S.Integers):
args[i] = self.__getitem__(arg)
else:
args[i] = self.__call__(arg)
elif not isinstance(arg, RandomIndexedSymbol):
raise ValueError("Expected a RandomIndexedSymbol or "
"key not %s"%(type(arg)))
if args[0].pspace.distribution == None: # checks if there is any distribution available
return JointDistribution(*args)
pdf = Lambda(tuple(args),
expr=Mul.fromiter(arg.pspace.process.density(arg) for arg in args))
return JointDistributionHandmade(pdf)
def expectation(self, condition, given_condition):
raise NotImplementedError("Abstract method for expectation queries.")
def sample(self):
raise NotImplementedError("Abstract method for sampling queries.")
class DiscreteTimeStochasticProcess(StochasticProcess):
"""
Base class for all discrete stochastic processes.
"""
def __getitem__(self, time):
"""
For indexing discrete time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
if time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
idx_obj = Indexed(self.symbol, time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(idx_obj, pspace_obj)
class ContinuousTimeStochasticProcess(StochasticProcess):
"""
Base class for all continuous time stochastic process.
"""
def __call__(self, time):
"""
For indexing continuous time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
if time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
func_obj = Function(self.symbol)(time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution)
return RandomIndexedSymbol(func_obj, pspace_obj)
class TransitionMatrixOf(Boolean):
"""
Assumes that the matrix is the transition matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, DiscreteMarkovChain):
raise ValueError("Currently only DiscreteMarkovChain "
"support TransitionMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
process = property(lambda self: self.args[0])
matrix = property(lambda self: self.args[1])
class GeneratorMatrixOf(TransitionMatrixOf):
"""
Assumes that the matrix is the generator matrix
of the process.
"""
def __new__(cls, process, matrix):
if not isinstance(process, ContinuousMarkovChain):
raise ValueError("Currently only ContinuousMarkovChain "
"support GeneratorMatrixOf.")
matrix = _matrix_checks(matrix)
return Basic.__new__(cls, process, matrix)
class StochasticStateSpaceOf(Boolean):
def __new__(cls, process, state_space):
if not isinstance(process, (DiscreteMarkovChain, ContinuousMarkovChain)):
raise ValueError("Currently only DiscreteMarkovChain and ContinuousMarkovChain "
"support StochasticStateSpaceOf.")
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
state_index = Range(ss_size)
return Basic.__new__(cls, process, state_index)
process = property(lambda self: self.args[0])
state_index = property(lambda self: self.args[1])
class MarkovProcess(StochasticProcess):
"""
Contains methods that handle queries
common to Markov processes.
"""
@property
def number_of_states(self) -> tUnion[Integer, Symbol]:
"""
The number of states in the Markov Chain.
"""
return _sympify(self.args[2].shape[0])
@property
def _state_index(self) -> Range:
"""
Returns state index as Range.
"""
return self.args[1]
@classmethod
def _sanity_checks(cls, state_space, trans_probs):
# Try to never have None as state_space or trans_probs.
# This helps a lot if we get it done at the start.
if (state_space is None) and (trans_probs is None):
_n = Dummy('n', integer=True, nonnegative=True)
state_space = _state_converter(Range(_n))
trans_probs = _matrix_checks(MatrixSymbol('_T', _n, _n))
elif state_space is None:
trans_probs = _matrix_checks(trans_probs)
state_space = _state_converter(Range(trans_probs.shape[0]))
elif trans_probs is None:
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
_n = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
_n = len(state_space)
trans_probs = MatrixSymbol('_T', _n, _n)
else:
state_space = _state_converter(state_space)
trans_probs = _matrix_checks(trans_probs)
# Range object doesn't want to give a symbolic size
# so we do it ourselves.
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
if ss_size != trans_probs.shape[0]:
raise ValueError('The size of the state space and the number of '
'rows of the transition matrix must be the same.')
return state_space, trans_probs
def _extract_information(self, given_condition):
"""
Helper function to extract information, like,
transition matrix/generator matrix, state space, etc.
"""
if isinstance(self, DiscreteMarkovChain):
trans_probs = self.transition_probabilities
state_index = self._state_index
elif isinstance(self, ContinuousMarkovChain):
trans_probs = self.generator_matrix
state_index = self._state_index
if isinstance(given_condition, And):
gcs = given_condition.args
given_condition = S.true
for gc in gcs:
if isinstance(gc, TransitionMatrixOf):
trans_probs = gc.matrix
if isinstance(gc, StochasticStateSpaceOf):
state_index = gc.state_index
if isinstance(gc, Relational):
given_condition = given_condition & gc
if isinstance(given_condition, TransitionMatrixOf):
trans_probs = given_condition.matrix
given_condition = S.true
if isinstance(given_condition, StochasticStateSpaceOf):
state_index = given_condition.state_index
given_condition = S.true
return trans_probs, state_index, given_condition
def _check_trans_probs(self, trans_probs, row_sum=1):
"""
Helper function for checking the validity of transition
probabilities.
"""
if not isinstance(trans_probs, MatrixSymbol):
rows = trans_probs.tolist()
for row in rows:
if (sum(row) - row_sum) != 0:
raise ValueError("Values in a row must sum to %s. "
"If you are using Float or floats | |
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes related to importing modules or names.
Normally imports are mostly relatively static, but Nuitka also attempts to
cover the uses of "__import__" built-in and other import techniques, that
allow dynamic values.
If other optimizations make it possible to predict these, the compiler can go
deeper that what it normally could. The import expression node can lead to
modules being added. After optimization it will be asked about used modules.
"""
import sys
from nuitka.__past__ import long, unicode, xrange
from nuitka.codegen.Reports import onMissingTrust
from nuitka.importing.Importing import isPackageDir, locateModule
from nuitka.importing.ImportResolving import resolveModuleName
from nuitka.importing.StandardLibrary import isStandardLibraryPath
from nuitka.Options import isStandaloneMode, shallWarnUnusualCode
from nuitka.PythonVersions import (
getFutureModuleKeys,
getImportlibSubPackages,
python_version,
)
from nuitka.specs.BuiltinParameterSpecs import (
BuiltinParameterSpec,
extractBuiltinArgs,
)
from nuitka.Tracing import unusual_logger
from nuitka.utils.ModuleNames import ModuleName
from .ConstantRefNodes import (
ExpressionConstantSysVersionInfoRef,
makeConstantRefNode,
)
from .ExpressionBases import (
ExpressionBase,
ExpressionChildHavingBase,
ExpressionChildrenHavingBase,
)
from .LocalsScopes import GlobalsDictHandle
from .NodeBases import StatementChildHavingBase
from .NodeMakingHelpers import makeRaiseExceptionReplacementExpression
from .shapes.BuiltinTypeShapes import tshape_module, tshape_module_builtin
# These module are supported in code generation to be imported the hard way.
hard_modules = frozenset(
(
"os",
"sys",
"types",
"typing",
"__future__",
"site",
"importlib",
"_frozen_importlib",
"_frozen_importlib_external",
"pkgutil",
"functools",
)
)
hard_modules_version = {
"typing": 0x350,
"_frozen_importlib": 0x300,
"_frozen_importlib_external": 0x350,
}
trust_undefined = 0
trust_constant = 1
trust_exist = 2
trust_future = trust_exist
trust_importable = 3
trust_node = 4
trust_may_exist = 5
trust_not_exist = 6
trust_node_factory = {}
module_importlib_trust = dict(
(key, trust_importable) for key in getImportlibSubPackages()
)
module_sys_trust = {
"version": trust_constant,
"hexversion": trust_constant,
"platform": trust_constant,
"maxsize": trust_constant,
"builtin_module_names": trust_constant,
"stdout": trust_exist,
"stderr": trust_exist,
}
if python_version < 0x270:
module_sys_trust["version_info"] = trust_constant
else:
module_sys_trust["version_info"] = trust_node
trust_node_factory[("sys", "version_info")] = ExpressionConstantSysVersionInfoRef
if python_version < 0x300:
module_sys_trust["exc_type"] = trust_may_exist
module_sys_trust["exc_value"] = trust_may_exist
module_sys_trust["exc_traceback"] = trust_may_exist
module_sys_trust["maxint"] = trust_constant
module_sys_trust["subversion"] = trust_constant
else:
module_sys_trust["exc_type"] = trust_not_exist
module_sys_trust["exc_value"] = trust_not_exist
module_sys_trust["exc_traceback"] = trust_not_exist
module_typing_trust = {
"TYPE_CHECKING": trust_constant,
}
module_os_trust = {"name": trust_constant}
hard_modules_trust = {
"os": module_os_trust,
"sys": module_sys_trust,
"types": {},
"typing": module_typing_trust,
"__future__": dict((key, trust_future) for key in getFutureModuleKeys()),
"site": {},
"importlib": module_importlib_trust,
"_frozen_importlib": {},
"_frozen_importlib_external": {},
"pkgutil": {"get_data": trust_exist},
"functools": {"partial": trust_exist},
}
def isHardModuleWithoutSideEffect(module_name):
return module_name in hard_modules and module_name != "site"
class ExpressionImportModuleFixed(ExpressionBase):
"""Hard coded import names, that we know to exist."
These created as result of builtin imports and "importlib.import_module" calls
that were compile time resolved, and for known module names.
"""
kind = "EXPRESSION_IMPORT_MODULE_FIXED"
__slots__ = (
"module_name",
"found_module_name",
"found_module_filename",
"finding",
)
def __init__(self, module_name, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.module_name = resolveModuleName(module_name)
self.finding = None
# If not found, we import the package at least
(
self.found_module_name,
self.found_module_filename,
self.finding,
) = self._attemptFollow()
def _attemptFollow(self):
found_module_name, found_module_filename, finding = locateModule(
module_name=self.module_name,
parent_package=None,
level=0,
)
if self.finding == "not-found":
while True:
module_name = found_module_filename.getPackageName()
if module_name is None:
break
found_module_name, found_module_filename, finding = locateModule(
module_name=module_name,
parent_package=None,
level=0,
)
if self.finding != "not-found":
break
return found_module_name, found_module_filename, finding
def finalize(self):
del self.parent
def getDetails(self):
return {"module_name": self.module_name}
def getModuleName(self):
return self.module_name
@staticmethod
def mayHaveSideEffects():
# TODO: For included modules, we might be able to tell, not not done now.
return True
@staticmethod
def mayRaiseException(exception_type):
# TODO: For included modules, we might be able to tell, not not done now.
return True
def getTypeShape(self):
if self.module_name in sys.builtin_module_names:
return tshape_module_builtin
else:
return tshape_module
def getUsedModule(self):
return self.found_module_name, self.found_module_filename, self.finding
def computeExpressionRaw(self, trace_collection):
if self.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
# Nothing to do about it.
return self, None, None
def computeExpressionImportName(self, import_node, import_name, trace_collection):
# TODO: For include modules, something might be possible here.
return self.computeExpressionAttribute(
lookup_node=import_node,
attribute_name=import_name,
trace_collection=trace_collection,
)
class ExpressionImportHardBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
#
__slots__ = ("module_name", "finding", "module_filename")
def __init__(self, module_name, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.module_name = ModuleName(module_name)
self.finding = None
self.module_filename = None
_module_name, self.module_filename, self.finding = locateModule(
module_name=self.module_name,
parent_package=None,
level=0,
)
# Expect to find them and to match the name of course.
assert self.finding != "not-found", self.module_name
assert _module_name == self.module_name
def getUsedModule(self):
return self.module_name, self.module_filename, self.finding
class ExpressionImportModuleHard(ExpressionImportHardBase):
"""Hard coded import names, e.g. of "__future__"
These are directly created for some Python mechanics, but also due to
compile time optimization for imports of statically known modules.
"""
kind = "EXPRESSION_IMPORT_MODULE_HARD"
__slots__ = ("module",)
def __init__(self, module_name, source_ref):
ExpressionImportHardBase.__init__(
self, module_name=module_name, source_ref=source_ref
)
if isHardModuleWithoutSideEffect(self.module_name):
self.module = __import__(self.module_name)
else:
self.module = None
def finalize(self):
del self.parent
def getDetails(self):
return {"module_name": self.module_name}
def getModuleName(self):
return self.module_name
def mayHaveSideEffects(self):
return self.module is None
def mayRaiseException(self, exception_type):
return self.mayHaveSideEffects()
def getTypeShape(self):
if self.module_name in sys.builtin_module_names:
return tshape_module_builtin
else:
return tshape_module
def computeExpressionRaw(self, trace_collection):
if self.finding is None:
self._attemptFollow()
if self.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
def computeExpressionImportName(self, import_node, import_name, trace_collection):
return self.computeExpressionAttribute(
lookup_node=import_node,
attribute_name=import_name,
trace_collection=trace_collection,
)
@staticmethod
def _getImportNameErrorString(module, module_name, name):
if python_version < 0x340:
return "cannot import name %s" % name
if python_version < 0x370:
return "cannot import name %r" % name
elif isStandaloneMode():
return "cannot import name %r from %r" % (name, module_name)
else:
return "cannot import name %r from %r (%s)" % (
name,
module_name,
module.__file__ if hasattr(module, "__file__") else "unknown location",
)
def computeExpressionAttribute(self, lookup_node, attribute_name, trace_collection):
# By default, an attribute lookup may change everything about the lookup
# source.
if self.module is not None:
trust = hard_modules_trust[self.module_name].get(
attribute_name, trust_undefined
)
if trust is trust_importable:
# TODO: Change this is a hard module import itself, currently these are not all trusted
# themselves yet. We do not have to indicate exception, but it makes no sense to annotate
# that here at this point.
trace_collection.onExceptionRaiseExit(BaseException)
elif trust is trust_may_exist:
trace_collection.onExceptionRaiseExit(BaseException)
elif (
not hasattr(self.module, attribute_name)
and trust is not trust_undefined
):
# TODO: Unify with below branches.
trace_collection.onExceptionRaiseExit(ImportError)
new_node = makeRaiseExceptionReplacementExpression(
expression=lookup_node,
exception_type="AttributeError",
exception_value=self._getImportNameErrorString(
self.module, self.module_name, attribute_name
),
)
return (
new_node,
"new_raise",
"Hard module %r attribute missing %r pre-computed."
% (self.module_name, attribute_name),
)
else:
if trust is trust_undefined:
trace_collection.onExceptionRaiseExit(ImportError)
onMissingTrust(
"Hard module %r attribute %r missing trust config for existing value.",
lookup_node.getSourceReference(),
self.module_name,
attribute_name,
)
elif trust is trust_constant:
# Make sure it's actually there, and not becoming the getattr default by accident.
assert hasattr(self.module, attribute_name), self
return (
makeConstantRefNode(
constant=getattr(self.module, attribute_name),
source_ref=lookup_node.getSourceReference(),
user_provided=True,
),
"new_constant",
"Hard module '%s' imported %r pre-computed to constant value."
% (self.module_name.asString(), attribute_name),
)
elif trust is trust_node:
result = trust_node_factory[self.module_name, attribute_name](
source_ref=lookup_node.source_ref
)
return (
result,
"new_expression",
"Attribute lookup %r of hard module %r becomes node %r."
% (self.module_name.asString(), attribute_name, result.kind),
)
else:
result = ExpressionImportModuleNameHard(
module_name=self.module_name,
import_name=attribute_name,
source_ref=lookup_node.getSourceReference(),
)
return (
result,
"new_expression",
"Attribute lookup %r of hard module %r becomes hard module name import."
% (self.module_name, attribute_name),
)
else:
# Nothing can be known, but lets not do control flow escape, that is just
# too unlikely.
trace_collection.onExceptionRaiseExit(BaseException)
return lookup_node, None, None
def hasShapeTrustedAttributes(self):
return True
class ExpressionImportModuleNameHard(ExpressionImportHardBase):
"""Hard coded import names, e.g. of "os.path.dirname"
These are directly created for some Python mechanics.
"""
kind = "EXPRESSION_IMPORT_MODULE_NAME_HARD"
__slots__ = ("import_name", "trust", "finding", "module_filename")
def __init__(self, module_name, import_name, source_ref):
ExpressionImportHardBase.__init__(
self, module_name=module_name, source_ref=source_ref
)
self.import_name = import_name
self.trust = hard_modules_trust[self.module_name].get(self.import_name)
def finalize(self):
del self.parent
def getDetails(self):
return {"module_name": self.module_name, "import_name": self.import_name}
def getModuleName(self):
return self.module_name
def getImportName(self):
return self.import_name
def computeExpressionRaw(self, trace_collection):
# As good as it gets, will exist, otherwise we do not get created.
if self.mayHaveSideEffects():
trace_collection.onExceptionRaiseExit(AttributeError)
return self, None, None
def mayHaveSideEffects(self):
return self.trust is None
def mayRaiseException(self, exception_type):
return self.trust is None
importlib_import_module_spec = BuiltinParameterSpec(
"importlib.import_module", ("name", "package"), default_count=1
)
class ExpressionImportlibImportModuleRef(ExpressionImportModuleNameHard):
kind = "EXPRESSION_IMPORTLIB_IMPORT_MODULE_REF"
def __init__(self, source_ref):
ExpressionImportModuleNameHard.__init__(
self,
module_name="importlib",
import_name="import_module",
source_ref=source_ref,
)
@staticmethod
def getDetails():
return {}
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = | |
<gh_stars>0
# Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Toolkit App Store Descriptor.
"""
import os
import urllib
import fnmatch
import urllib2
import httplib
from tank_vendor.shotgun_api3.lib import httplib2
import cPickle as pickle
from ...util import shotgun
from ...util import UnresolvableCoreConfigurationError, ShotgunAttachmentDownloadError
from ...util.user_settings import UserSettings
from ..descriptor import Descriptor
from ..errors import TankAppStoreConnectionError
from ..errors import TankAppStoreError
from ..errors import TankDescriptorError
from ..errors import InvalidAppStoreCredentialsError
from ... import LogManager
from .. import constants
from .downloadable import IODescriptorDownloadable
from ...constants import SUPPORT_EMAIL
# use api json to cover py 2.5
from tank_vendor import shotgun_api3
json = shotgun_api3.shotgun.json
log = LogManager.get_logger(__name__)
# file where we cache the app store metadata for an item
METADATA_FILE = ".cached_metadata.pickle"
class IODescriptorAppStore(IODescriptorDownloadable):
"""
Represents a toolkit app store item.
{type: app_store, name: tk-core, version: v12.3.4}
{type: app_store, name: NAME, version: VERSION}
"""
# cache app store connections for performance
_app_store_connections = {}
# internal app store mappings
(APP, FRAMEWORK, ENGINE, CONFIG, CORE) = range(5)
_APP_STORE_OBJECT = {
Descriptor.APP: constants.TANK_APP_ENTITY_TYPE,
Descriptor.FRAMEWORK: constants.TANK_FRAMEWORK_ENTITY_TYPE,
Descriptor.ENGINE: constants.TANK_ENGINE_ENTITY_TYPE,
Descriptor.CONFIG: constants.TANK_CONFIG_ENTITY_TYPE,
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: None,
}
_APP_STORE_VERSION = {
Descriptor.APP: constants.TANK_APP_VERSION_ENTITY_TYPE,
Descriptor.FRAMEWORK: constants.TANK_FRAMEWORK_VERSION_ENTITY_TYPE,
Descriptor.ENGINE: constants.TANK_ENGINE_VERSION_ENTITY_TYPE,
Descriptor.CONFIG: constants.TANK_CONFIG_VERSION_ENTITY_TYPE,
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: constants.TANK_CORE_VERSION_ENTITY_TYPE,
}
_APP_STORE_LINK = {
Descriptor.APP: "sg_tank_app",
Descriptor.FRAMEWORK: "sg_tank_framework",
Descriptor.ENGINE: "sg_tank_engine",
Descriptor.CONFIG: "sg_tank_config",
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: None,
}
_DOWNLOAD_STATS_EVENT_TYPE = {
Descriptor.APP: "TankAppStore_App_Download",
Descriptor.FRAMEWORK: "TankAppStore_Framework_Download",
Descriptor.ENGINE: "TankAppStore_Engine_Download",
Descriptor.CONFIG: "TankAppStore_Config_Download",
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: "TankAppStore_CoreApi_Download",
}
_VERSION_FIELDS_TO_CACHE = [
"id",
"code",
"sg_status_list",
"description",
"tags",
"sg_detailed_release_notes",
"sg_documentation",
constants.TANK_CODE_PAYLOAD_FIELD
]
_BUNDLE_FIELDS_TO_CACHE = [
"id",
"sg_system_name",
"sg_status_list",
"sg_deprecation_message"
]
def __init__(self, descriptor_dict, sg_connection, bundle_type):
"""
Constructor
:param descriptor_dict: descriptor dictionary describing the bundle
:param sg_connection: Shotgun connection to associated site
:param bundle_type: Either Descriptor.APP, CORE, ENGINE or FRAMEWORK or CONFIG
:return: Descriptor instance
"""
super(IODescriptorAppStore, self).__init__(descriptor_dict)
self._validate_descriptor(
descriptor_dict,
required=["type", "name", "version"],
optional=["label"]
)
self._sg_connection = sg_connection
self._type = bundle_type
self._name = descriptor_dict.get("name")
self._version = descriptor_dict.get("version")
self._label = descriptor_dict.get("label")
def __str__(self):
"""
Human readable representation
"""
display_name_lookup = {
Descriptor.APP: "App",
Descriptor.FRAMEWORK: "Framework",
Descriptor.ENGINE: "Engine",
Descriptor.CONFIG: "Config",
Descriptor.CORE: "Core",
}
# Toolkit App Store App tk-multi-loader2 v1.2.3
# Toolkit App Store Framework tk-framework-shotgunutils v1.2.3
# Toolkit App Store Core v1.2.3
if self._type == Descriptor.CORE:
display_name = "Toolkit App Store Core %s" % self._version
else:
display_name = display_name_lookup[self._type]
display_name = "Toolkit App Store %s %s %s" % (display_name, self._name, self._version)
if self._label:
display_name += " [label %s]" % self._label
return display_name
def __load_cached_app_store_metadata(self, path):
"""
Loads the metadata for a path in the app store
:param path: path to bundle location on disk
:return: metadata dictionary or None if not found
"""
cache_file = os.path.join(path, METADATA_FILE)
if os.path.exists(cache_file):
fp = open(cache_file, "rt")
try:
metadata = pickle.load(fp)
finally:
fp.close()
else:
log.debug(
"%r Could not find cached metadata file %s - "
"will proceed with empty app store metadata." % (self, cache_file)
)
metadata = {}
return metadata
@LogManager.log_timing
def __refresh_metadata(self, path, sg_bundle_data=None, sg_version_data=None):
"""
Refreshes the metadata cache on disk. The metadata cache contains
app store information such as deprecation status, label information
and release note data.
For performance, the metadata can be provided by the caller. If
not provided, the method will retrieve it from the app store.
If the descriptor resides in a read-only bundle cache, for example
baked into a DCC distribution, the cache will not be updated.
:param path: The path to the bundle where cache info should be written
:param sg_bundle_data, sg_version_data: Shotgun data to cache
:returns: A dictionary with keys 'sg_bundle_data' and 'sg_version_data',
containing Shotgun metadata.
"""
log.debug("Attempting to refresh app store metadata for %r" % self)
cache_file = os.path.join(path, METADATA_FILE)
log.debug("Will attempt to refresh cache in %s" % cache_file)
if sg_version_data: # no none-check for sg_bundle_data param since this is none for tk-core
log.debug("Will cache pre-fetched cache data.")
else:
log.debug("Connecting to Shotgun to retrieve metadata for %r" % self)
# get the appropriate shotgun app store types and fields
bundle_entity_type = self._APP_STORE_OBJECT[self._type]
version_entity_type = self._APP_STORE_VERSION[self._type]
link_field = self._APP_STORE_LINK[self._type]
# connect to the app store
(sg, _) = self.__create_sg_app_store_connection()
if self._type == self.CORE:
# special handling of core since it doesn't have a high-level 'bundle' entity
sg_bundle_data = None
sg_version_data = sg.find_one(
constants.TANK_CORE_VERSION_ENTITY_TYPE,
[["code", "is", self._version]],
self._VERSION_FIELDS_TO_CACHE
)
if sg_version_data is None:
raise TankDescriptorError(
"The App store does not have a version '%s' of Core!" % self._version
)
else:
# engines, apps etc have a 'bundle level entity' in the app store,
# e.g. something representing the app or engine.
# then a version entity representing a particular version
sg_bundle_data = sg.find_one(
bundle_entity_type,
[["sg_system_name", "is", self._name]],
self._BUNDLE_FIELDS_TO_CACHE
)
if sg_bundle_data is None:
raise TankDescriptorError(
"The App store does not contain an item named '%s'!" % self._name
)
# now get the version
sg_version_data = sg.find_one(
version_entity_type,
[
[link_field, "is", sg_bundle_data],
["code", "is", self._version]
],
self._VERSION_FIELDS_TO_CACHE
)
if sg_version_data is None:
raise TankDescriptorError(
"The App store does not have a "
"version '%s' of item '%s'!" % (self._version, self._name)
)
# create metadata
metadata = {
"sg_bundle_data": sg_bundle_data,
"sg_version_data": sg_version_data
}
# try to write to location - but it may be located in a
# readonly bundle cache - if the caching fails, gracefully
# fall back and log
try:
fp = open(cache_file, "wt")
try:
pickle.dump(metadata, fp)
log.debug("Wrote app store metadata cache '%s'" % cache_file)
finally:
fp.close()
except Exception as e:
log.debug("Did not update app store metadata cache '%s': %s" % (cache_file, e))
return metadata
def _get_bundle_cache_path(self, bundle_cache_root):
"""
Given a cache root, compute a cache path suitable
for this descriptor, using the 0.18+ path format.
:param bundle_cache_root: Bundle cache root path
:return: Path to bundle cache location
"""
return os.path.join(
bundle_cache_root,
"app_store",
self.get_system_name(),
self.get_version()
)
def _get_cache_paths(self):
"""
Get a list of resolved paths, starting with the primary and
continuing with alternative locations where it may reside.
Note: This method only computes paths and does not perform any I/O ops.
:return: List of path strings
"""
# get default cache paths from base class
paths = super(IODescriptorAppStore, self)._get_cache_paths()
# for compatibility with older versions of core, prior to v0.18.x,
# add the old-style bundle cache path as a fallback. As of v0.18.x,
# the bundle cache subdirectory names were shortened and otherwise
# modified to help prevent MAX_PATH issues on windows. This call adds
# the old path as a fallback for cases where core has been upgraded
# for an existing project. NOTE: This only works because the bundle
# cache root didn't change (when use_bundle_cache is set to False).
# If the bundle cache root changes across core versions, then this will
# need to be refactored.
legacy_folder = self._get_legacy_bundle_install_folder(
"app_store",
self._bundle_cache_root,
self._type,
self.get_system_name(),
self.get_version()
)
if legacy_folder:
paths.append(legacy_folder)
return paths
###############################################################################################
# data accessors
def get_system_name(self):
"""
Returns a short name, suitable for use in configuration files
and for folders on disk
"""
return self._name
def get_deprecation_status(self):
"""
Returns information about deprecation.
May download the item from the app store in order
to retrieve the metadata.
:returns: Returns a tuple (is_deprecated, message) to indicate
if this item is deprecated.
"""
# make sure we have the app payload + metadata
self.ensure_local()
# grab metadata
metadata = self.__load_cached_app_store_metadata(
self.get_path()
)
sg_bundle_data = metadata.get("sg_bundle_data") or {}
if sg_bundle_data.get("sg_status_list") == "dep":
msg = sg_bundle_data.get("sg_deprecation_message", "No reason given.")
return (True, msg)
else:
return (False, "")
def get_version(self):
"""
Returns the version number string for this item
"""
return self._version
def get_changelog(self):
"""
Returns information about the changelog for this item.
May download the item from the app store in order
to retrieve the metadata.
:returns: A tuple (changelog_summary, changelog_url). Values may be None
to indicate that no changelog exists.
"""
summary = None
url = None
# make sure we have the app payload + metadata
| |
#!/usr/bin/env python
"""
EODataDownGEDIsensor.
"""
# This file is part of 'EODataDown'
# A tool for automating Earth Observation Data Downloading.
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Purpose: Provides an implementation for the GEDI sensor.
#
# Author: <NAME>
# Email: <EMAIL>
# Date: 15/04/2020
# Version: 1.0
#
# History:
# Version 1.0 - Created.
import logging
import json
import datetime
import os
import shutil
import multiprocessing
import sys
import importlib
import traceback
import eodatadown.eodatadownutils
from eodatadown.eodatadownutils import EODataDownException
from eodatadown.eodatadownutils import EODataDownResponseException
from eodatadown.eodatadownsensor import EODataDownSensor
from eodatadown.eodatadownusagedb import EODataDownUpdateUsageLogDB
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy
import sqlalchemy.dialects.postgresql
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy.sql.expression import func
import requests
logger = logging.getLogger(__name__)
Base = declarative_base()
class EDDICESAT2(Base):
__tablename__ = "EDDICESAT2"
PID = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
Producer_ID = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Granule_ID = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Title = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Start_Time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False)
End_Time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False)
Updated_Time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False)
Product = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Version = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Online = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Original_Format = sqlalchemy.Column(sqlalchemy.String, nullable=True)
Orb_Ascending_Crossing = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
Orb_Start_Direct = sqlalchemy.Column(sqlalchemy.String, nullable=True)
Orb_Start_Lat = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
Orb_End_Direct = sqlalchemy.Column(sqlalchemy.String, nullable=True)
Orb_End_Lat = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
Eq_Cross_Time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
Eq_Cross_Lon = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
Orbit_Number = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
North_Lat = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
South_Lat = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
East_Lon = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
West_Lon = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
Total_Size = sqlalchemy.Column(sqlalchemy.Float, nullable=True)
File_MD5 = sqlalchemy.Column(sqlalchemy.String, nullable=True)
Remote_URL = sqlalchemy.Column(sqlalchemy.String, nullable=False)
Query_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False)
Download_Start_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
Download_End_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
Downloaded = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Download_Path = sqlalchemy.Column(sqlalchemy.String, nullable=False, default="")
Archived = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
ARDProduct_Start_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
ARDProduct_End_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
ARDProduct = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
ARDProduct_Path = sqlalchemy.Column(sqlalchemy.String, nullable=False, default="")
DCLoaded_Start_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
DCLoaded_End_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
DCLoaded = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Invalid = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
ExtendedInfo = sqlalchemy.Column(sqlalchemy.dialects.postgresql.JSONB, nullable=True)
RegCheck = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
class EDDICESAT2Plugins(Base):
__tablename__ = "EDDICESAT2Plugins"
Scene_PID = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
PlugInName = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
Start_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
End_Date = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
Completed = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Success = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Outputs = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
Error = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
ExtendedInfo = sqlalchemy.Column(sqlalchemy.dialects.postgresql.JSONB, nullable=True)
def _download_icesat2_file(params):
"""
Function which is used with multiprocessing pool object for downloading ICESAT2 data.
:param params: List of parameters [PID, Product_ID, Remote_URL, DB_Info_Obj, download_path, username, password]
"""
pid = params[0]
producer_id = params[1]
remote_url = params[2]
db_info_obj = params[3]
scn_lcl_dwnld_path = params[4]
exp_out_file = params[5]
earth_data_user = params[6]
earth_data_pass = params[7]
dir_lcl_data_cache = params[8]
success = False
found_lcl_file = False
if dir_lcl_data_cache is not None:
file_name = os.path.basename(exp_out_file)
for lcl_dir in dir_lcl_data_cache:
if os.path.exists(lcl_dir) and os.path.isdir(lcl_dir):
lcl_file = os.path.join(lcl_dir, file_name)
if os.path.exists(lcl_file):
found_lcl_file = True
break
start_date = datetime.datetime.now()
if found_lcl_file:
shutil.copy(lcl_file, scn_lcl_dwnld_path)
success = True
else:
eodd_wget_downloader = eodatadown.eodatadownutils.EODDWGetDownload()
try:
success = eodd_wget_downloader.downloadFile(remote_url, scn_lcl_dwnld_path, username=earth_data_user,
password=<PASSWORD>, try_number="10", time_out="60")
except Exception as e:
logger.error("An error has occurred while downloading from ICESAT2: '{}'".format(e))
end_date = datetime.datetime.now()
if success and os.path.exists(exp_out_file) and os.path.isfile(exp_out_file):
logger.debug("Set up database connection and update record.")
db_engine = sqlalchemy.create_engine(db_info_obj.dbConn)
session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)
ses = session_sqlalc()
query_result = ses.query(EDDICESAT2).filter(EDDICESAT2.PID == pid).one_or_none()
if query_result is None:
logger.error("Could not find the scene within local database: {}".format(producer_id))
else:
fileHashUtils = eodatadown.eodatadownutils.EDDCheckFileHash()
file_md5 = fileHashUtils.calcMD5Checksum(exp_out_file)
query_result.Downloaded = True
query_result.Download_Start_Date = start_date
query_result.Download_End_Date = end_date
query_result.Download_Path = scn_lcl_dwnld_path
query_result.File_MD5 = file_md5
ses.commit()
ses.close()
logger.info("Finished download and updated database: {}".format(scn_lcl_dwnld_path))
else:
logger.error("Download did not complete, re-run and it should try again: {}".format(scn_lcl_dwnld_path))
class EODataDownICESAT2Sensor (EODataDownSensor):
"""
An abstract class which represents a sensor and defines the functions a sensor must have.
"""
def __init__(self, db_info_obj):
EODataDownSensor.__init__(self, db_info_obj)
self.sensor_name = "ICESAT2"
self.db_tab_name = "EDDICESAT2"
self.ard_vec_format = "GPKG"
def parse_sensor_config(self, config_file, first_parse=False):
"""
Parse the JSON configuration file. If first_parse=True then a signature file will be created
which will be checked each time the system runs to ensure changes are not back to the
configuration file. If the signature does not match the input file then an expection will be
thrown. To update the configuration (e.g., extent date range or spatial area) run with first_parse=True.
:param config_file: string with the path to the JSON file.
:param first_parse: boolean as to whether the file has been previously parsed.
"""
edd_file_checker = eodatadown.eodatadownutils.EDDCheckFileHash()
# If it is the first time the config_file is parsed then create the signature file.
if first_parse:
edd_file_checker.createFileSig(config_file)
logger.debug("Created signature file for config file.")
if not edd_file_checker.checkFileSig(config_file):
raise EODataDownException("Input config did not match the file signature.")
with open(config_file) as f:
config_data = json.load(f)
json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()
eodd_utils = eodatadown.eodatadownutils.EODataDownUtils()
logger.debug("Testing config file is for 'ICESAT2'")
json_parse_helper.getStrValue(config_data, ["eodatadown", "sensor", "name"], [self.sensor_name])
logger.debug("Have the correct config file for 'ICESAT2'")
logger.debug("Find ARD processing params from config file")
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "ardparams"]):
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "ardparams", "vecformat"]):
self.ard_vec_format = json_parse_helper.getStrValue(config_data, ["eodatadown", "sensor",
"ardparams", "vecformat"])
self.ardProjDefined = False
self.projabbv = ""
self.projEPSG = None
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "ardparams", "proj"]):
self.ardProjDefined = True
self.projabbv = json_parse_helper.getStrValue(config_data,
["eodatadown", "sensor", "ardparams", "proj",
"projabbv"])
self.projEPSG = int(json_parse_helper.getNumericValue(config_data,
["eodatadown", "sensor", "ardparams",
"proj",
"epsg"], 0, 1000000000))
logger.debug("Found ARD processing params from config file")
logger.debug("Find paths from config file")
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "paths"]):
self.parse_output_paths_config(config_data["eodatadown"]["sensor"]["paths"])
logger.debug("Found paths from config file")
logger.debug("Find search params from config file")
self.startDate = json_parse_helper.getDateValue(config_data,
["eodatadown", "sensor", "download", "startdate"],
"%Y-%m-%d")
if not json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "download", "products"]):
raise EODataDownException("You must provide at least one product you want to be downloaded.")
products_lst = json_parse_helper.getListValue(config_data,
["eodatadown", "sensor", "download", "products"])
self.productsLst = []
for product in products_lst:
prod_id = json_parse_helper.getStrValue(product, ["product"],
["ATL02", "ATL03", "ATL04", "ATL06", "ATL07", "ATL08",
"ATL09", "ATL10", "ATL12", "ATL13"])
prod_version = json_parse_helper.getStrValue(product, ["version"],
["001", "002", "003", "004", "005", "006", "007",
"008", "009", "010"])
self.productsLst.append({"product":prod_id, "version":prod_version})
geo_bounds_lst = json_parse_helper.getListValue(config_data,
["eodatadown", "sensor", "download", "geobounds"])
if not len(geo_bounds_lst) > 0:
raise EODataDownException("There must be at least 1 geographic boundary given.")
self.geoBounds = list()
for geo_bound_json in geo_bounds_lst:
edd_bbox = eodatadown.eodatadownutils.EDDGeoBBox()
edd_bbox.setNorthLat(json_parse_helper.getNumericValue(geo_bound_json, ["north_lat"], -90, 90))
edd_bbox.setSouthLat(json_parse_helper.getNumericValue(geo_bound_json, ["south_lat"], -90, 90))
edd_bbox.setWestLon(json_parse_helper.getNumericValue(geo_bound_json, ["west_lon"], -180, 180))
edd_bbox.setEastLon(json_parse_helper.getNumericValue(geo_bound_json, ["east_lon"], -180, 180))
self.geoBounds.append(edd_bbox)
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "download", "lcl_data_cache"]):
self.dir_lcl_data_cache = json_parse_helper.getListValue(config_data, ["eodatadown", "sensor",
"download", "lcl_data_cache"])
else:
self.dir_lcl_data_cache = None
logger.debug("Found search params from config file")
self.scn_intersect = False
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "validity"]):
logger.debug("Find scene validity params from config file")
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "validity", "scn_intersect"]):
self.scn_intersect_vec_file = json_parse_helper.getStrValue(config_data,
["eodatadown", "sensor", "validity",
"scn_intersect", "vec_file"])
self.scn_intersect_vec_lyr = json_parse_helper.getStrValue(config_data,
["eodatadown", "sensor", "validity",
"scn_intersect", "vec_lyr"])
self.scn_intersect = True
logger.debug("Found scene validity params from config file")
logger.debug("Find EarthData Account params from config file")
edd_pass_encoder = eodatadown.eodatadownutils.EDDPasswordTools()
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "earthdata", "usrpassfile"]):
usr_pass_file = json_parse_helper.getStrValue(config_data, ["eodatadown", "sensor", "earthdata", "usrpassfile"])
if os.path.exists(usr_pass_file):
usr_pass_info = eodd_utils.readTextFile2List(usr_pass_file)
self.earthDataUser = usr_pass_info[0]
self.earthDataPass = edd_pass_encoder.unencodePassword(usr_pass_info[1])
else:
raise EODataDownException("The username/password file specified does not exist on the system.")
else:
self.earthDataUser = json_parse_helper.getStrValue(config_data, ["eodatadown", "sensor", "earthdata", "user"])
self.earthDataPass = edd_pass_encoder.unencodePassword(json_parse_helper.getStrValue(config_data, ["eodatadown", "sensor", "earthdata", "pass"]))
logger.debug("Found EarthData Account params from config file")
logger.debug("Find the plugins params")
if json_parse_helper.doesPathExist(config_data, ["eodatadown", "sensor", "plugins"]):
self.parse_plugins_config(config_data["eodatadown"]["sensor"]["plugins"])
logger.debug("Found the plugins params")
def init_sensor_db(self, drop_tables=True):
"""
A function which initialises the database use the db_info_obj passed to __init__.
Be careful as running this function drops the table if it already exists and therefore
any data would be lost.
"""
logger.debug("Creating Database Engine.")
db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)
if drop_tables:
logger.debug("Drop system table if within the existing database.")
Base.metadata.drop_all(db_engine)
logger.debug("Creating EDDICESAT2 Database.")
Base.metadata.bind = db_engine
Base.metadata.create_all()
def check_http_response_auth(self, response, url):
"""
Check the HTTP response and raise an exception with appropriate error message
if request was not successful.
:param response: the http response object.
:param url: the URL called.
:return: boolean as to whether status is successful or otherwise.
"""
try:
response.raise_for_status()
success = True
except (requests.HTTPError, ValueError):
success = False
excpt_msg = "Invalid API response."
try:
excpt_msgs = response.json()["errors"]
excpt_msg = ""
n = 1
for msg in | |
<reponame>meet-seth/Coursera-Deep-Learning<filename>Natural Language Processing with Attention Models/Week 1 - Neural Machine Translation/w1_unittest.py
import numpy as np
import trax
from trax import layers as tl
from trax.fastmath import numpy as fastnp
from trax.supervised import training
VOCAB_FILE = 'ende_32k.subword'
VOCAB_DIR = 'data/'
def jaccard_similarity(candidate, reference):
"""Returns the Jaccard similarity between two token lists
Args:
candidate (list of int): tokenized version of the candidate translation
reference (list of int): tokenized version of the reference translation
Returns:
float: overlap between the two token lists
"""
# convert the lists to a set to get the unique tokens
can_unigram_set, ref_unigram_set = set(candidate), set(reference)
# get the set of tokens common to both candidate and reference
joint_elems = can_unigram_set.intersection(ref_unigram_set)
# get the set of all tokens found in either candidate or reference
all_elems = can_unigram_set.union(ref_unigram_set)
# divide the number of joint elements by the number of all elements
overlap = len(joint_elems) / len(all_elems)
return overlap
def weighted_avg_overlap(similarity_fn, samples, log_probs):
"""Returns the weighted mean of each candidate sentence in the samples
Args:
samples (list of lists): tokenized version of the translated sentences
log_probs (list of float): log probability of the translated sentences
Returns:
dict: scores of each sample
key: index of the sample
value: score of the sample
"""
# initialize dictionary
scores = {}
# run a for loop for each sample
for index_candidate, candidate in enumerate(samples):
# initialize overlap and weighted sum
overlap, weight_sum = 0.0, 0.0
# run a for loop for each sample
for index_sample, (sample, logp) in enumerate(zip(samples, log_probs)):
# skip if the candidate index is the same as the sample index
if index_candidate == index_sample:
continue
# convert log probability to linear scale
sample_p = float(np.exp(logp))
# update the weighted sum
weight_sum += sample_p
# get the unigram overlap between candidate and sample
sample_overlap = similarity_fn(candidate, sample)
# update the overlap
overlap += sample_p * sample_overlap
# get the score for the candidate
score = overlap / weight_sum
# save the score in the dictionary. use index as the key.
scores[index_candidate] = score
return scores
# UNIT TEST for UNQ_C1
def test_input_encoder_fn(input_encoder_fn):
target = input_encoder_fn
success = 0
fails = 0
input_vocab_size = 10
d_model = 2
n_encoder_layers = 6
encoder = target(input_vocab_size, d_model, n_encoder_layers)
lstms = "\n".join([f' LSTM_{d_model}'] * n_encoder_layers)
expected = f"Serial[\n Embedding_{input_vocab_size}_{d_model}\n{lstms}\n]"
proposed = str(encoder)
# Test all layers are in the expected sequence
try:
assert(proposed.replace(" ", "") == expected.replace(" ", ""))
success += 1
except:
fails += 1
print("Wrong model. \nProposed:\n%s" %proposed, "\nExpected:\n%s" %expected)
# Test the output type
try:
assert(isinstance(encoder, trax.layers.combinators.Serial))
success += 1
# Test the number of layers
try:
# Test
assert len(encoder.sublayers) == (n_encoder_layers + 1)
success += 1
except:
fails += 1
print('The number of sublayers does not match %s <>' %len(encoder.sublayers), " %s" %(n_encoder_layers + 1))
except:
fails += 1
print("The enconder is not an object of ", trax.layers.combinators.Serial)
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C2
def test_pre_attention_decoder_fn(pre_attention_decoder_fn):
target = pre_attention_decoder_fn
success = 0
fails = 0
mode = 'train'
target_vocab_size = 10
d_model = 2
decoder = target(mode, target_vocab_size, d_model)
expected = f"Serial[\n ShiftRight(1)\n Embedding_{target_vocab_size}_{d_model}\n LSTM_{d_model}\n]"
proposed = str(decoder)
# Test all layers are in the expected sequence
try:
assert(proposed.replace(" ", "") == expected.replace(" ", ""))
success += 1
except:
fails += 1
print("Wrong model. \nProposed:\n%s" %proposed, "\nExpected:\n%s" %expected)
# Test the output type
try:
assert(isinstance(decoder, trax.layers.combinators.Serial))
success += 1
# Test the number of layers
try:
# Test
assert len(decoder.sublayers) == 3
success += 1
except:
fails += 1
print('The number of sublayers does not match %s <>' %len(decoder.sublayers), " %s" %3)
except:
fails += 1
print("The enconder is not an object of ", trax.layers.combinators.Serial)
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C3
def test_prepare_attention_input(prepare_attention_input):
target = prepare_attention_input
success = 0
fails = 0
#This unit test consider a batch size = 2, number_of_tokens = 3 and embedding_size = 4
enc_act = fastnp.array([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0]]])
dec_act = fastnp.array([[[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 0]],
[[2, 0, 2, 0], [0, 2, 0, 2], [0, 0, 0, 0]]])
inputs = fastnp.array([[1, 2, 3], [1, 4, 0]])
exp_mask = fastnp.array([[[[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]],
[[[1., 1., 0.], [1., 1., 0.], [1., 1., 0.]]]])
exp_type = type(enc_act)
queries, keys, values, mask = target(enc_act, dec_act, inputs)
try:
assert(fastnp.allclose(queries, dec_act))
success += 1
except:
fails += 1
print("Queries does not match the decoder activations")
try:
assert(fastnp.allclose(keys, enc_act))
success += 1
except:
fails += 1
print("Keys does not match the encoder activations")
try:
assert(fastnp.allclose(values, enc_act))
success += 1
except:
fails += 1
print("Values does not match the encoder activations")
try:
assert(fastnp.allclose(mask, exp_mask))
success += 1
except:
fails += 1
print("Mask does not match expected tensor. \nExpected:\n%s" %exp_mask, "\nOutput:\n%s" %mask)
# Test the output type
try:
assert(isinstance(queries, exp_type))
assert(isinstance(keys, exp_type))
assert(isinstance(values, exp_type))
assert(isinstance(mask, exp_type))
success += 1
except:
fails += 1
print("One of the output object are not of type ", jax.interpreters.xla.DeviceArray)
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C4
def test_NMTAttn(NMTAttn):
test_cases = [
{
"name":"simple_test_check",
"expected":"Serial_in2_out2[\n Select[0,1,0,1]_in2_out4\n Parallel_in2_out2[\n Serial[\n Embedding_33300_1024\n LSTM_1024\n LSTM_1024\n ]\n Serial[\n ShiftRight(1)\n Embedding_33300_1024\n LSTM_1024\n ]\n ]\n PrepareAttentionInput_in3_out4\n Serial_in4_out2[\n Branch_in4_out3[\n None\n Serial_in4_out2[\n Parallel_in3_out3[\n Dense_1024\n Dense_1024\n Dense_1024\n ]\n PureAttention_in4_out2\n Dense_1024\n ]\n ]\n Add_in2\n ]\n Select[0,2]_in3_out2\n LSTM_1024\n LSTM_1024\n Dense_33300\n LogSoftmax\n]",
"error":"The NMTAttn is not defined properly."
},
{
"name":"layer_len_check",
"expected":9,
"error":"We found {} layers in your model. It should be 9.\nCheck the LSTM stack before the dense layer"
},
{
"name":"selection_layer_check",
"expected":["Select[0,1,0,1]_in2_out4", "Select[0,2]_in3_out2"],
"error":"Look at your selection layers."
}
]
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(NMTAttn())
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(NMTAttn().sublayers):
success += 1
else:
print(test_case["error"].format(len(NMTAttn().sublayers)))
fails += 1
if test_case['name'] == "selection_layer_check":
model = NMTAttn()
output = [str(model.sublayers[0]),str(model.sublayers[4])]
check_count = 0
for i in range(2):
if test_case["expected"][i] != output[i]:
print(test_case["error"])
fails += 1
break
else:
check_count += 1
if check_count == 2:
success += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C5
def test_train_task(train_task):
target = train_task
success = 0
fails = 0
# Test the labeled data parameter
try:
strlabel = str(target._labeled_data)
assert(strlabel.find("generator") and strlabel.find('add_loss_weights'))
success += 1
except:
fails += 1
print("Wrong labeled data parameter")
# Test the cross entropy loss data parameter
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
# Test the optimizer parameter
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
# Test the schedule parameter
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
# Test the _n_steps_per_checkpoint parameter
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C6
def test_next_symbol(next_symbol, model):
target = next_symbol
the_model = model
success = 0
fails = 0
tokens_en = np.array([[17332, 140, 172, 207, 1]])
# Test the type and size of output
try:
next_de_tokens = target(the_model, tokens_en, [], 0.0)
assert(isinstance(next_de_tokens, tuple))
| |
threatLevels.index(self._maxThreat):
self._textProduct.debug_print("updating max threat to = %s" % (threatLevel), 1)
self._maxThreat = threatLevel
def _calculateHourOffset(self, targetTime):
self._textProduct.debug_print("Calculating hours from issuance time for %s"
% (self._textProduct._pp.pformat(targetTime)), 1)
self._textProduct.debug_print("target unix time = %s"
% (self._textProduct._pp.pformat(targetTime.unixTime())), 1)
self._textProduct.debug_print("issuance unix time = %s"
% (self._textProduct._pp.pformat(self._textProduct._issueTime_secs)), 1)
seconds = targetTime.unixTime() - self._textProduct._issueTime_secs
hour = int(round(seconds/60.0/60.0))
self._textProduct.debug_print("hour offset = %s" % (hour), 1)
if hour < 0:
hour = 0
self._textProduct.debug_print("final hour offset = %s" % (hour), 1)
return hour
class WindSectionStats(SectionCommonStats):
def __init__(self, textProduct, segment, statList, timeRangeList):
SectionCommonStats.__init__(self, textProduct, segment)
# The maximum wind speed that occurs during the entire advisory.
self._maxWind = None
# The maximum wind gust speed that occurs during the entire advisory.
self._maxGust = None
# The number of hours since the issuance time when the wind first becomes >= 34 kts.
self._onset34Hour = None
# The number of hours since the issuance time when the wind drops below 34 kts.
self._end34Hour = None
# Text describing when tropical storm force winds (>= 34 kts) start and end.
self._windowTS = None
# Text describing when hurricane force winds (>= 64 kts) start and end.
self._windowHU = None
# Only gather stats if we have the wind speed probability grids available
if self._textProduct._WSPGridsAvailable:
self._textProduct.debug_print("#"*90)
self._textProduct.debug_print("Setting wind stats for %s" % (segment), 1)
self._setStats(statList, timeRangeList)
self._textProduct.debug_print("#"*90)
# pws34int and pws64int grids give you the probability of 34/64 kt winds
# occurring during the grid time range. The grids are 6 hours long so they
# give you a more specific starting or ending time which allows for better
# descriptions of when events start.
class PwsXXintStats():
def __init__(self):
# The maximum value in pws34/64int grids across the entire advisory.
self.max = None
# The number of hours since the issuance time when this maximum value first occurs.
self.onsetHour = None
# pwsD34, pwsN34, pwsD64 and pwsN64 grids give you the probability of 34/64
# kt winds occurring during the grid time range. They are 12 hour long day
# and night grids that match ZPF periods. They give you a ball park idea of
# when an event will start or end and if it's day or night time and then
# the pwsXXint grids can be used to narrow down the time frame.
class PwsTXXStats():
def __init__(self):
# Depending on when the issuance time is, there may be a day or night
# grid that we need to drop at the beginning so that we start with the
# grid that occurs during our issuance time so that our windows are
# accurate.
# We need to do special logic the first time around so record if this
# is the first run through the loop or not.
self.firstRun = True
# Indicates if we need to possibly drop the first grid or not.
self.dropFirstGridType = None
# Indicates if we actually did drop the first grid. Sometimes we will
# determine that we need to drop the grid if it exists but it doesn't
# end up existing so we don't actually drop anything in some cases.
self.droppedFirstGrid = False
# Indicate the period (actually a 0-based index into a list of periods)
# that contains the first correct grid.
self.periodWithFirstCorrectGrid = None
# The AbsTime of when the grids first met or exceeded the threshold.
self.onsetTime = None
# The AbsTime of when the grids last met or exceeded the threshold.
self.endTime = None
# Start and end hour information from the Wind grids.
class WindStats():
def __init__(self):
# The number of hours since issuance time when the wind first gets >= 34/64 knots.
self.onsetHour = None
# The number of hours since issuance time when the wind is last >= 34/64 knots.
self.endHour = None
# Information needed for creating the wind window text.
class WindowInfo():
def __init__(self, eventType):
# The type (as a string) of the event this window is for (Tropical Storm or Hurricane).
self.eventType = eventType
# The number of hours since issuance time when the tropical storm or hurricane starts.
self.onsetHour = None
# The number of hours since issuance time when the tropical storm or hurricane ends.
self.endHour = None
# The resolution to use when determining the wording for the end time of the window.
self.endTimeResolution = None
# Determines if we should create window text for this event (did wind exceed threshold?)
self.shouldCreateWindowText = True
# The constructed window text.
self.windowText = None
def _setStats(self, statList, timeRangeList):
pws34intStats = self.PwsXXintStats()
pws64intStats = self.PwsXXintStats()
pwsT34Stats = self.PwsTXXStats()
pwsT64Stats = self.PwsTXXStats()
wind34timeInfo = self.WindStats()
wind64timeInfo = self.WindStats()
prob34Onset = None
for index in range(len(statList)):
tr, _ = timeRangeList[index]
statDict = statList[index]
self._textProduct.debug_print("="*90, 1)
self._textProduct.debug_print("\n\ntr = %s" % (tr), 1)
self._textProduct.debug_print("*"*90, 1)
currentPeriod = self._determineCurrentPeriod(tr)
self._textProduct.debug_print("*"*90, 1)
self._updateStatsForPwsXXint(tr, statDict, "pws34int", pws34intStats)
self._textProduct.debug_print("-"*45, 1)
self._updateStatsForPwsXXint(tr, statDict, "pws64int", pws64intStats)
self._textProduct.debug_print("*"*90, 1)
self._updateStatsForPwsTXX(tr, statDict, "pwsD34", "pwsN34", pwsT34Stats, currentPeriod)
self._textProduct.debug_print("-"*45, 1)
self._updateStatsForPwsTXX(tr, statDict, "pwsD64", "pwsN64", pwsT64Stats, currentPeriod)
# Calculate an additional probabilistic onset hour for scenarios where we weren't
# able to calculate the onset the usual way. This is only done for tropical
# storms to help determine the correct TR (check plans, etc.)
if prob34Onset is None and pwsT34Stats.onsetTime is not None:
self._textProduct.debug_print("*"*90, 1)
self._textProduct.debug_print("Found pwsD/N34 onset time, calculating prob34Onset", 1)
prob34Onset = self._calculateProbOnset(timeRangeList, statList, index, "pws34int")
self._textProduct.debug_print("*"*90, 1)
self._updateStatsForWind(tr, statDict, wind34timeInfo, speed=34)
self._textProduct.debug_print("-"*45, 1)
self._updateStatsForWind(tr, statDict, wind64timeInfo, speed=64)
self._textProduct.debug_print("*"*90, 1)
self._updateMaxWindGust(statDict)
self._textProduct.debug_print("*"*90, 1)
self._updateThreatStats(tr, statDict, "WindThreat")
self._textProduct.debug_print("="*90, 1)
#Tropical Storm
self._textProduct.debug_print("Tropical Storm Window:", 1)
tropicalStormWindow = self.WindowInfo("Tropical Storm")
tropicalStormWindow = self._computeWindOnsetAndEnd(tropicalStormWindow,
wind34timeInfo,
pws34intStats,
pwsT34Stats,
prob34Onset)
tropicalStormWindow = self._createWindowText(tropicalStormWindow)
# The tropical storm onset and end hours will be used for calculating threat statements
self._onset34Hour = tropicalStormWindow.onsetHour
self._end34Hour = tropicalStormWindow.endHour
self._windowTS = tropicalStormWindow.windowText
#Hurricane
self._textProduct.debug_print("-"*45, 1)
self._textProduct.debug_print("Hurricane Window:", 1)
hurricaneWindow = self.WindowInfo("Hurricane")
hurricaneWindow = self._computeWindOnsetAndEnd(hurricaneWindow,
wind64timeInfo,
pws64intStats,
pwsT64Stats)
# Make sure the hurricane window end time resolution is the same
# resolution used for tropical storms so that hurricanes don't appear
# to end after tropical storms
hurricaneWindow.endTimeResolution = tropicalStormWindow.endTimeResolution
hurricaneWindow = self._createWindowText(hurricaneWindow)
self._windowHU = hurricaneWindow.windowText
self._textProduct.debug_print("-"*45, 1)
self._currentAdvisory["WindThreat"] = self._maxThreat
self._currentAdvisory["WindForecast"] = self._maxWind
self._textProduct.debug_print("+"*60, 1)
self._textProduct.debug_print("In WindSectionStats._setStats", 1)
self._textProduct.debug_print("pws34intStats.max = %s" % (pws34intStats.max), 1)
self._textProduct.debug_print("pws64intStats.max = %s" % (pws64intStats.max), 1)
self._textProduct.debug_print("pwsT34Stats.periodWithFirstCorrectGrid = %s" % (pwsT34Stats.periodWithFirstCorrectGrid), 1)
self._textProduct.debug_print("pwsT34Stats.endTime = '%s'" % (pwsT34Stats.endTime), 1)
self._textProduct.debug_print("pwsT64Stats.periodWithFirstCorrectGrid = %s" % (pwsT64Stats.periodWithFirstCorrectGrid), 1)
self._textProduct.debug_print("pwsT64Stats.endTime = '%s'" % (pwsT64Stats.endTime), 1)
self._textProduct.debug_print("self._maxWind = %s" % (self._maxWind), 1)
self._textProduct.debug_print("self._maxGust = %s" % (self._maxGust), 1)
self._textProduct.debug_print("self._maxThreat = %s" % (self._maxThreat), 1)
def _determineCurrentPeriod(self, tr):
currentPeriod = None
for periodIndex, periodTr in enumerate(self._textProduct._periodList):
self._textProduct.debug_print("\n\nperiodIndex = %d periodList tr = %s"
% (periodIndex, repr(periodTr)), 1)
if (periodIndex == 0) and (tr.startTime().unixTime() < periodTr.startTime().unixTime()):
# If the tr is before the first period, use the first period
currentPeriod = periodIndex
break
elif (periodIndex == len(self._textProduct._periodList) - 1) and \
(tr.startTime().unixTime() >= periodTr.endTime().unixTime()):
# If the tr is after (or at the end of) the last period, use the last period
currentPeriod = periodIndex
break
elif periodTr.contains(tr.startTime()):
currentPeriod = periodIndex
break
self._textProduct.debug_print("\n\ncurrentPeriod index = %s" % (currentPeriod), 1)
self._textProduct.debug_print("\n\ncurrentPeriod tr = %s"
% (self._textProduct._periodList[currentPeriod]), 1)
return currentPeriod
def _updateStatsForPwsXXint(self, tr, statDict, gridName, pwsXXintStats):
pwsXXint = self._textProduct._getStatValue(statDict, gridName, "Max")
self._textProduct.debug_print("Wind Window Debug: pwsXXintStats gridName = %s" % (gridName), 1)
self._textProduct.debug_print("Wind Window Debug: pwsXXintStats pwsXXint = %s" % (pwsXXint), 1)
if pwsXXint is not None:
if pwsXXintStats.max is None or pwsXXint > pwsXXintStats.max:
pwsXXintStats.max = pwsXXint
pwsXXintStats.onsetHour = self._calculateHourOffset(tr.startTime())
self._textProduct.debug_print("Wind Window Debug: pwsXXintStats Found a new max value!", 1)
self._textProduct.debug_print("Wind Window Debug: pwsXXintStats onsetHour = %s" % (pwsXXintStats.onsetHour), 1)
def _updateStatsForPwsTXX(self, tr, statDict, dayGridName, | |
as e:
raise DBError(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
if (stringArray[2] != '**DIRECT**'):
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1)
else:
#If we have a **DIRECT** mount, then attach entity 1 to entity 0
api.addEntityLink(entityID0, entityID1)
backTrackCorrect = False
linkType = None
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
addLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[3], linkType)
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
backTrackLocationList = api.getLinkCounterpartsByType(entityID1, stringArray[4], linkType)
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except DBError as e:
errorMsg = ('Database Error! Check to see if the Database has been started and that meme %s is in the appropriate table.' % (e) )
errata.append(errorMsg)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[2])
allTrueResult = str(testResult)
expectedResult = stringArray[5]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testCondition(filename):
method = moduleName + '.' + 'testCondition'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
testArgumentMap = {stringArray[2] : stringArray[1]}
try:
testArgumentMap[stringArray[4]] = stringArray[3]
except:
pass
try:
testArgumentMap[stringArray[6]] = stringArray[5]
except:
pass
try:
del testArgumentMap['XXX']
except:
pass
testResult = False
try:
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
entityID = entityIDListEntry
testResult = api.evaluateEntity(entityID, testArgumentMap)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testAACondition(filename):
method = moduleName + '.' + 'testAACondition'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
testArgumentMap = {}
subjectID = api.createEntityFromMeme(stringArray[1])
objectID = None
try:
objectID = Graph.api.createEntityFromMeme(stringArray[2])
except:
pass
if objectID is None:
objectID = subjectID
try:
del testArgumentMap['XXX']
except:
pass
testResult = False
try:
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
cEntityID = entityIDListEntry
testResult = api.evaluateEntity(cEntityID, testArgumentMap, None, subjectID, objectID)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceCreateMeme(filename):
method = moduleName + '.' + 'testSourceCreateMeme'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
#Phase 1 - explicit Metameme and Meme declaration
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
operationResult = {}
testResult = False
try:
operationResult = api.sourceMemeCreate(memeName, modulePath, metamemePath)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Phase 2 - Default Metameme, default module
testResult = False
memeName = "DefaultMetamemeMeme"
try:
operationResult = api.sourceMemeCreate(memeName)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %("Graphyne", memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
#Phase 3 - Default Metameme, custom module
testResult = False
try:
operationResult = api.sourceMemeCreate(memeName, "CustomModule")
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %("Graphyne", memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceProperty(filename):
method = moduleName + '.' + 'testSourceProperty'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
propName = stringArray[3]
propValueStr = stringArray[4]
operationResult = {}
testResult = "False"
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
operationResult = api.sourceMemePropertySet(sourceMeme["memeID"], propName, propValueStr)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s with property %s, %s" %(testResult[0], propName, propValueStr)
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
expectedResult = stringArray[5]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourcePropertyRemove(filename):
method = moduleName + '.' + 'testSourcePropertyRemove'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = "%s_remove" %stringArray[1]
memeName = stringArray[2]
propName = stringArray[3]
propValueStr = stringArray[4]
sourceMeme = []
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
unusedAddProp = api.sourceMemePropertySet(sourceMeme["memeID"], propName, propValueStr)
operationResult = api.sourceMemePropertyRemove(sourceMeme["memeID"], propName)
#list: [u'SourceProperty1_remove.L', [True, []]]
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s with property %s, %s removed" %(sourceMeme["memeID"], propName, propValueStr)
expectedResult = stringArray[5]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceMember(filename):
method = moduleName + '.' + 'testSourceMember'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
#e.g. (Examples.M, SourceMember3, M, Examples.L, SourceMember3, L, 2, False)
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath | |
(%s)'%\
(TimeList[-1][0],TimeList[-1][1])
debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\
(TimeList[0][0],TimeList[0][1])
# =============================
# == log file eror detection ==
# =============================
# Find the number of potential errors found in all log files
# This re is a simple match on a case-insensitve 'error' but there is
# also some veto added for excluding the sentence
# "See Section 6 of paper for error calculation."
# which appear in the header of lhapdf in the logs.
err_finder = re.compile(\
r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE)
for log in all_log_files:
logfile=open(log,'r')
nErrors = len(re.findall(err_finder, logfile.read()))
logfile.close()
if nErrors != 0:
stats['Errors'].append((str(log),nErrors))
nErrors = sum([err[1] for err in stats['Errors']],0)
if nErrors != 0:
debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\
%(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\
'found in the following log file%s:'%('s' if \
len(stats['Errors'])>1 else '')
for error in stats['Errors'][:3]:
log_name = '/'.join(error[0].split('/')[-5:])
debug_msg += '\n > %d error%s in %s'%\
(error[1],'s' if error[1]>1 else '',log_name)
if len(stats['Errors'])>3:
nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0)
nRemainingLogs = len(stats['Errors'])-3
debug_msg += '\n And another %d error%s in %d other log file%s'%\
(nRemainingErrors, 's' if nRemainingErrors>1 else '',
nRemainingLogs, 's ' if nRemainingLogs>1 else '')
return message, debug_msg
def reweight_and_collect_events(self, options, mode, nevents, event_norm):
"""this function calls the reweighting routines and creates the event file in the
Event dir. Return the name of the event file created
"""
scale_pdf_info=[]
if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \
len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1:
scale_pdf_info = self.run_reweight(options['reweightonly'])
self.update_status('Collecting events', level='parton', update_results=True)
misc.compile(['collect_events'],
cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile'])
p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'),
stdin=subprocess.PIPE,
stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w'))
if event_norm.lower() == 'sum':
p.communicate(input = '1\n')
elif event_norm.lower() == 'unity':
p.communicate(input = '3\n')
elif event_norm.lower() == 'bias':
p.communicate(input = '0\n')
else:
p.communicate(input = '2\n')
#get filename from collect events
filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1]
if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)):
raise aMCatNLOError('An error occurred during event generation. ' + \
'The event file has not been created. Check collect_events.log')
evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')
misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file)
if not options['reweightonly']:
self.print_summary(options, 2, mode, scale_pdf_info)
res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses'))
for res_file in res_files:
files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name))
logger.info('The %s file has been generated.\n' % (evt_file))
self.results.add_detail('nb_event', nevents)
self.update_status('Events generated', level='parton', update_results=True)
return evt_file[:-3]
def run_mcatnlo(self, evt_file, options):
"""runs mcatnlo on the generated event file, to produce showered-events
"""
logger.info('Preparing MCatNLO run')
try:
misc.gunzip(evt_file)
except Exception:
pass
self.banner = banner_mod.Banner(evt_file)
shower = self.banner.get_detail('run_card', 'parton_shower').upper()
#check that the number of split event files divides the number of
# events, otherwise set it to 1
if int(self.banner.get_detail('run_card', 'nevents') / \
self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \
!= self.banner.get_detail('run_card', 'nevents'):
logger.warning(\
'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \
'Setting it to 1.')
self.shower_card['nsplit_jobs'] = 1
# don't split jobs if the user asks to shower only a part of the events
if self.shower_card['nevents'] > 0 and \
self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \
self.shower_card['nsplit_jobs'] != 1:
logger.warning(\
'Only a part of the events will be showered.\n' + \
'Setting nsplit_jobs in the shower_card to 1.')
self.shower_card['nsplit_jobs'] = 1
self.banner_to_mcatnlo(evt_file)
# if fastjet has to be linked (in extralibs) then
# add lib /include dirs for fastjet if fastjet-config is present on the
# system, otherwise add fjcore to the files to combine
if 'fastjet' in self.shower_card['extralibs']:
#first, check that stdc++ is also linked
if not 'stdc++' in self.shower_card['extralibs']:
logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS')
self.shower_card['extralibs'] += ' stdc++'
# then check if options[fastjet] corresponds to a valid fj installation
try:
#this is for a complete fj installation
p = subprocess.Popen([self.options['fastjet'], '--prefix'], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
#remove the line break from output (last character)
output = output[:-1]
# add lib/include paths
if not pjoin(output, 'lib') in self.shower_card['extrapaths']:
logger.warning('Linking FastJet: updating EXTRAPATHS')
self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib')
if not pjoin(output, 'include') in self.shower_card['includepaths']:
logger.warning('Linking FastJet: updating INCLUDEPATHS')
self.shower_card['includepaths'] += ' ' + pjoin(output, 'include')
# to be changed in the fortran wrapper
include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ'
namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ'
except Exception:
logger.warning('Linking FastJet: using fjcore')
# this is for FJcore, so no FJ library has to be linked
self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '')
if not 'fjcore.o' in self.shower_card['analyse']:
self.shower_card['analyse'] += ' fjcore.o'
# to be changed in the fortran wrapper
include_line = '#include "fjcore.hh"//INCLUDE_FJ'
namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ'
# change the fortran wrapper with the correct namespaces/include
fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n')
for line in fjwrapper_lines:
if '//INCLUDE_FJ' in line:
fjwrapper_lines[fjwrapper_lines.index(line)] = include_line
if '//NAMESPACE_FJ' in line:
fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line
with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock:
fsock.write('\n'.join(fjwrapper_lines) + '\n')
extrapaths = self.shower_card['extrapaths'].split()
# check that the path needed by HW++ and PY8 are set if one uses these shower
if shower in ['HERWIGPP', 'PYTHIA8']:
path_dict = {'HERWIGPP': ['hepmc_path',
'thepeg_path',
'hwpp_path'],
'PYTHIA8': ['pythia8_path']}
if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]):
raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \
('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower])))
if shower == 'HERWIGPP':
extrapaths.append(pjoin(self.options['hepmc_path'], 'lib'))
self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib')
# add the HEPMC path of the pythia8 installation
if shower == 'PYTHIA8':
hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'],
stdout = subprocess.PIPE).stdout.read().strip()
#this gives all the flags, i.e.
#-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC
# we just need the path to the HepMC libraries
extrapaths.append(hepmc.split()[1].replace('-L', ''))
if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')):
extrapaths.append(pjoin(self.options['pythia8_path'], 'lib'))
# set the PATH for the dynamic libraries
if sys.platform == 'darwin':
ld_library_path = 'DYLD_LIBRARY_PATH'
else:
ld_library_path = 'LD_LIBRARY_PATH'
if ld_library_path in os.environ.keys():
paths = os.environ[ld_library_path]
else:
paths = ''
paths += ':' + ':'.join(extrapaths)
os.putenv(ld_library_path, paths)
shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')
self.shower_card.write_card(shower, shower_card_path)
# overwrite if shower_card_set.dat exists in MCatNLO
if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')):
files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'),
pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat'))
mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log')
self.update_status('Compiling MCatNLO for %s...' % shower, level='shower')
# libdl may be needded for pythia 82xx
#if shower == 'PYTHIA8' and not \
# os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \
# 'dl' not in self.shower_card['extralibs'].split():
# # 'dl' has to be linked with the extralibs
# self.shower_card['extralibs'] += ' dl'
# logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \
# "It is needed for the correct running of PY8.2xx.\n" + \
# "If this library cannot be found on your system, a crash will occur.")
misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'),
stderr=open(mcatnlo_log, 'w'),
cwd=pjoin(self.me_dir, 'MCatNLO'),
close_fds=True)
exe = 'MCATNLO_%s_EXE' % shower
if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \
not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')):
print open(mcatnlo_log).read()
raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log)
logger.info(' ... done')
# create an empty dir where to run
count = 1
while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \
(shower, count))):
count += 1
rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \
(shower, count))
os.mkdir(rundir)
files.cp(shower_card_path, rundir)
#look for the event files (don't resplit if one asks for the
# same number of event files as in the previous run)
event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name))
if max(len(event_files), 1) != self.shower_card['nsplit_jobs']:
logger.info('Cleaning old files and splitting the event file...')
#clean the old files
files.rm([f for f in event_files if 'events.lhe' not in f])
if self.shower_card['nsplit_jobs'] > 1:
misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile'])
p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')],
stdin=subprocess.PIPE,
stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'),
cwd=pjoin(self.me_dir, 'Events', self.run_name))
p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs'])
logger.info('Splitting done.')
event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name))
event_files.sort()
self.update_status('Showering events...', level='shower')
logger.info('(Running in %s)' % rundir)
if shower != 'PYTHIA8':
files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir)
files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir)
else:
# special treatment for pythia8
files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir)
files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir)
if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx
files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir)
files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir)
else: # this is PY8.2xxx
files.ln(pjoin(self.options['pythia8_path'], | |
<reponame>PiRSquared17/r-orange
# Author: <NAME> (<EMAIL>) modified by <NAME>
# Description:
# signal dialog, canvas options dialog
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import OWGUI,redRQTCore, sys, os
import RSession
import redREnviron, re, redRStyle, redRObjects
import redRLog
import redRi18n
# def _(a):
# return a
_ = redRi18n.Coreget_()
class MetaDialog(QDialog):
def __init__(self, filename):
QDialog.__init__(self)
## GUI
self.setLayout(QVBoxLayout())
self.layout().setMargin(2)
self.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum)
self.setMinimumSize(QSize(500, 400))
topWidgetPart = redRQTCore.widgetBox(self, orientation="vertical", margin=0)
topWidgetPart.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum)
self.layout().addWidget(topWidgetPart)
self.controlArea = redRQTCore.widgetBox(topWidgetPart, orientation="vertical", margin=4)
label = redRQTCore.widgetLabel(self.controlArea, label = _('Meta data for %s not found you can add it now since you appear to be a developer.') % filename)
self.text = redRQTCore.textEdit(self.controlArea, label = _('Widget Meta Data'))
with open(os.path.join(redREnviron.directoryNames['widgetDir'], 'blank', 'meta', 'widgets', 'widgetTemplate.xml'), 'r') as f:
g = f.read()
self.text.insertPlainText(g)
self.notNow = False
buttonBox = redRQTCore.widgetBox(self.controlArea, orientation = 'horizontal')
acceptButton = redRQTCore.button(buttonBox, label = _('OK'), callback = self.accept)
rejectButton = redRQTCore.button(buttonBox, label = _('Cancel'), callback = self.reject)
doneButton = redRQTCore.button(buttonBox, label = _('Not Now'), callback = self.notNowCallback)
def notNowCallback(self):
self.notNow = True
self.reject()
class ColorIcon(QToolButton):
def __init__(self, parent, color):
QToolButton.__init__(self, parent)
self.color = color
self.setMaximumSize(20,20)
self.connect(self, SIGNAL("clicked()"), self.showColorDialog)
self.updateColor()
def updateColor(self):
pixmap = QPixmap(16,16)
painter = QPainter()
painter.begin(pixmap)
painter.setPen(QPen(self.color))
painter.setBrush(QBrush(self.color))
painter.drawRect(0, 0, 16, 16);
painter.end()
self.setIcon(QIcon(pixmap))
self.setIconSize(QSize(16,16))
def drawButtonLabel(self, painter):
painter.setBrush(QBrush(self.color))
painter.setPen(QPen(self.color))
painter.drawRect(3, 3, self.width()-6, self.height()-6)
def showColorDialog(self):
color = QColorDialog.getColor(self.color, self)
if color.isValid():
self.color = color
self.updateColor()
self.repaint()
# canvas dialog
class CanvasOptionsDlg(QDialog):
def __init__(self, canvasDlg):
QDialog.__init__(self,canvasDlg)
self.canvasDlg = canvasDlg
self.settings = dict(redREnviron.settings) # create a copy of the settings dict. in case we accept the dialog, we update the redREnviron.settings with this dict
if sys.platform == "darwin":
self.setWindowTitle(_("Preferences"))
else:
self.setWindowTitle(_("Canvas Options"))
self.topLayout = QVBoxLayout(self)
self.topLayout.setSpacing(10)
self.resize(450,300)
self.toAdd = []
self.toRemove = []
self.tabs = QTabWidget(self)
GeneralTab = OWGUI.widgetBox(self.tabs, margin = 4)
GeneralTab.layout().setAlignment(Qt.AlignTop)
# lookandFeel = OWGUI.widgetBox(self.tabs, margin = 4)
# lookandFeel.layout().setAlignment(Qt.AlignTop)
UnderHood = OWGUI.widgetBox(self.tabs, margin = 4)
UnderHood.layout().setAlignment(Qt.AlignTop)
ExceptionsTab = OWGUI.widgetBox(self.tabs, margin = 4)
ExceptionsTab.layout().setAlignment(Qt.AlignTop)
RSettings = OWGUI.widgetBox(self.tabs, margin = 4)
RSettings.layout().setAlignment(Qt.AlignTop)
self.tabs.addTab(GeneralTab, "General")
# self.tabs.addTab(lookandFeel, "Look and Feel")
self.tabs.addTab(UnderHood, "Under the Hood")
self.tabs.addTab(ExceptionsTab, "Exceptions & Logging")
self.tabs.addTab(RSettings, _('R Settings'))
QObject.connect(self.tabs, SIGNAL('currentChanged(int)'), self.onTabChange)
#GeneralTab.layout().addStretch(1)
# #################################################################
# GENERAL TAB
generalBox = OWGUI.widgetBox(GeneralTab, _('General Options'))
self.emailEdit = OWGUI.lineEdit(generalBox, self.settings, "email", _("Email Address:"), orientation = 'horizontal')
# self.helpModeSelection = OWGUI.checkBox(generalBox,self.settings,'helpMode',
# _('Show help icons'))
self.checkForUpdates = redRQTCore.checkBox(generalBox, label = 'checkForUpdates', displayLabel = 0, buttons = [('checkForUpdates',_("Periodically Check For Updates"))])
if redREnviron.settings['checkForUpdates']:
self.checkForUpdates.setChecked('checkForUpdates')
self.checkForPackageUpdates = redRQTCore.checkBox(generalBox, label = 'checkForPackageUpdates', displayLabel = 0, buttons = [('checkForPackageUpdates',_("Periodically Check For Package Updates"))])
if redREnviron.settings['checkForPackageUpdates']:
self.checkForPackageUpdates.setChecked('checkForPackageUpdates')
self.dontAskBeforeCloseCB= OWGUI.checkBox(generalBox, self.settings, "dontAskBeforeClose",
_("Don't ask to save schema before closing"), debuggingEnabled = 0)
self.dontAskBeforeDeleting = redRQTCore.checkBox(generalBox, label = 'askbeforedelete', displayLabel = 0, buttons = [('ask',_("Ask Before Deleting Widget"))])
if redREnviron.settings['askBeforeWidgetDelete']:
self.dontAskBeforeDeleting.setChecked('ask')
# #################################################################
# LOOK AND FEEL TAB
# validator = QIntValidator(self)
# validator.setRange(0,10000)
lookFeelBox = OWGUI.widgetBox(GeneralTab, _("Look and Feel Options"))
self.snapToGridCB = OWGUI.checkBox(lookFeelBox, self.settings, "snapToGrid",
_("Snap widgets to grid"), debuggingEnabled = 0)
self.showSignalNamesCB = OWGUI.checkBox(lookFeelBox, self.settings, "showSignalNames",
_("Show signal names between widgets"), debuggingEnabled = 0)
self.saveWidgetsPositionCB = OWGUI.checkBox(lookFeelBox, self.settings, "saveWidgetsPosition",
_("Save size and position of widgets"), debuggingEnabled = 0)
items = ["%d x %d" % (v,v) for v in redRStyle.iconSizeList]
# val = min(len(items)-1, self.settings['schemeIconSize'])
self.schemeIconSizeCombo = OWGUI.comboBoxWithCaption(lookFeelBox, self.settings, 'schemeIconSize',
_("Scheme icon size:"), items = items, tooltip = _("Set the size of the widget icons on the scheme"),
debuggingEnabled = 0)
# redREnviron.settings["toolbarIconSize"] = min(len(items)-1, redREnviron.settings["toolbarIconSize"])
self.toolbarIconSizeCombo = OWGUI.comboBoxWithCaption(lookFeelBox, self.settings, "toolbarIconSize",
_("Widget Tree Icon size:"), items = items,
tooltip = _("Set the size of the widget icons in the toolbar, tool box, and tree view area"),
debuggingEnabled = 0)
# hbox1 = OWGUI.widgetBox(GeneralTab, orientation = "horizontal")
# canvasDlgSettings = OWGUI.widgetBox(hbox1, "Canvas Dialog Settings")
# schemeSettings = OWGUI.widgetBox(hbox1, "Scheme Settings")
# self.widthSlider = OWGUI.qwtHSlider(canvasDlgSettings, self.settings, "canvasWidth",
# minValue = 300, maxValue = 1200, label = "Canvas width: ", step = 50, precision = " %.0f px", debuggingEnabled = 0)
# self.heightSlider = OWGUI.qwtHSlider(canvasDlgSettings, self.settings, "canvasHeight",
# minValue = 300, maxValue = 1200, label = "Canvas height: ", step = 50, precision = " %.0f px", debuggingEnabled = 0)
# OWGUI.separator(canvasDlgSettings)
OWGUI.comboBox(lookFeelBox, self.settings, "style", label = _("Window style:"), orientation = "horizontal",
items = redRStyle.QtStyles, sendSelectedValue = 1, debuggingEnabled = 0)
#OWGUI.checkBox(lookFeelBox, self.settings, "useDefaultPalette", _("Use style's standard palette"), debuggingEnabled = 0)
self.language = redRQTCore.listBox(lookFeelBox, label = _('Language'), items = self.settings['language'], enableDragDrop = 1)
# selectedWidgetBox = OWGUI.widgetBox(schemeSettings, orientation = "horizontal")
# self.selectedWidgetIcon = ColorIcon(selectedWidgetBox, redRStyle.widgetSelectedColor)
# selectedWidgetBox.layout().addWidget(self.selectedWidgetIcon)
# selectedWidgetLabel = OWGUI.widgetLabel(selectedWidgetBox, " Selected widget")
# activeWidgetBox = OWGUI.widgetBox(schemeSettings, orientation = "horizontal")
# self.activeWidgetIcon = ColorIcon(activeWidgetBox, redRStyle.widgetActiveColor)
# activeWidgetBox.layout().addWidget(self.activeWidgetIcon)
# selectedWidgetLabel = OWGUI.widgetLabel(activeWidgetBox, " Active widget")
# activeLineBox = OWGUI.widgetBox(schemeSettings, orientation = "horizontal")
# self.activeLineIcon = ColorIcon(activeLineBox, redRStyle.lineColor)
# activeLineBox.layout().addWidget(self.activeLineIcon)
# selectedWidgetLabel = OWGUI.widgetLabel(activeLineBox, " Active Lines")
# inactiveLineBox = OWGUI.widgetBox(schemeSettings, orientation = "horizontal")
# self.inactiveLineIcon = ColorIcon(inactiveLineBox, redRStyle.lineColor)
# inactiveLineBox.layout().addWidget(self.inactiveLineIcon)
# selectedWidgetLabel = OWGUI.widgetLabel(inactiveLineBox, " Inactive Lines")
# #################################################################
# UNDER THE HOOD TAB
templates = OWGUI.widgetBox(UnderHood, _("Templates Add On Dirs"))
self.templateDirsListBox = redRQTCore.listBox(templates, label = _("Template Directories"), items = redREnviron.settings['templateDirectories'])
templateButtons = redRQTCore.widgetBox(templates, orientation = 'horizontal')
redRQTCore.button(templateButtons, label = _('Add Directory'), callback = self.addTemplateDirectory)
redRQTCore.button(templateButtons, label = _('Remove Selected'), callback = self.removeTemplateDirectory)
redRQTCore.button(UnderHood, label = _('Regression Test (Core Developers Only)'), callback = lambda val = 1:self.regressionTest(val))
redRQTCore.button(UnderHood, label = _('Test Packages (Core Developers Only)'), callback = lambda val = 2:self.regressionTest(val))
redRQTCore.button(UnderHood, label = _('Create help index.'), callback = self.createHelpIndex)
#redRQTCore.button(UnderHood, label = _('Create Red-R Documentation'), callback = self.createPackageDocs)
# #################################################################
# EXCEPTION TAB
debug = OWGUI.widgetBox(ExceptionsTab, _("Debug"))
# self.setDebugModeCheckBox = OWGUI.checkBox(debug, self.settings, "debugMode", "Set to debug mode") # sets the debug mode of the canvas.
self.verbosityCombo = OWGUI.comboBox(debug, self.settings, "outputVerbosity", label = _("Set level of widget output: "),
orientation='horizontal', items=redRLog.logLevelsName)
self.displayTraceback = OWGUI.checkBox(debug, self.settings, "displayTraceback", _('Display Traceback'))
# self.exceptionLevel = redRQTCore.spinBox(debug, label = 'Exception Print Level:', toolTip = 'Select the level of exception that will be printed to the Red-R general output', min = 0, max = 9, value = redREnviron.settings['exceptionLevel'])
# self.otherLevel = redRQTCore.spinBox(debug, label = 'General Print Level:', toolTip = _('Select the level of general logging that will be output to the general output'), min = 0, max = 9, value = redREnviron.settings['minSeverity'])
exceptions = OWGUI.widgetBox(ExceptionsTab, _("Exceptions"))
#self.catchExceptionCB = QCheckBox(_('Catch exceptions'), exceptions)
self.focusOnCatchExceptionCB = OWGUI.checkBox(exceptions, self.settings, "focusOnCatchException", _('Show output window on exception'))
# self.printExceptionInStatusBarCB = OWGUI.checkBox(exceptions, self.settings, "printExceptionInStatusBar", _('Print last exception in status bar'))
self.printExceptionInStatusBarCB = OWGUI.checkBox(exceptions, self.settings, "uploadError", _('Submit Error Report'))
self.printExceptionInStatusBarCB = OWGUI.checkBox(exceptions, self.settings, "askToUploadError", _('Always ask before submitting error report'))
output = OWGUI.widgetBox(ExceptionsTab, _("Log File"))
#self.catchOutputCB = QCheckBox(_('Catch system output'), output)
self.writeLogFileCB = OWGUI.checkBox(output, self.settings, "writeLogFile",
_("Save content of the Output window to a log file"))
hbox = OWGUI.widgetBox(output, orientation = "horizontal")
self.logFile = redRQTCore.lineEdit(hbox, label= _("Log Dir:"), orientation = 'horizontal',
text=self.settings['logsDir'])
self.okButton = OWGUI.button(hbox, self, _("Browse"), callback = self.browseLogFile)
#self.showOutputLog = redRQTCore.button(output, label = _('Show Log File'), callback = self.showLogFile)
self.numberOfDays = redRQTCore.spinBox(output, label = 'Keep Log Files for X days:', min = -1, value = self.settings['keepForXDays'], callback = self.numberOfDaysChanged)
# self.focusOnCatchOutputCB = OWGUI.checkBox(output, self.settings, "focusOnCatchOutput", _('Focus output window on system output'))
# self.printOutputInStatusBarCB = OWGUI.checkBox(output, self.settings, "printOutputInStatusBar", _('Print last system output in status bar'))
ExceptionsTab.layout().addStretch(1)
#####################################
# R Settings Tab
self.rlibrariesBox = OWGUI.widgetBox(RSettings, _('R Libraries'))
redRQTCore.button(RSettings, label = _('Update R Libraries'), callback = self.updatePackages)
self.libInfo = redRQTCore.widgetLabel(self.rlibrariesBox, label='Repository URL:\n '+ self.settings['CRANrepos'])
self.libListBox = redRQTCore.listBox(self.rlibrariesBox, label = _('Mirrors'),
callback = self.setMirror)
################################ Global buttons ######################
# OK, Cancel buttons
hbox = OWGUI.widgetBox(self, orientation = "horizontal", sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed))
hbox.layout().addStretch(1)
self.okButton = OWGUI.button(hbox, self, _("OK"), callback = self.accept)
self.cancelButton = OWGUI.button(hbox, self, _("Cancel"), callback = self.reject)
#self.connect(self.tabOrderList, SIGNAL("currentRowChanged(int)"), self.enableDisableButtons)
self.topLayout.addWidget(self.tabs)
self.topLayout.addWidget(hbox)
# def createPackageDocs(self):
# import doc.createDoc as createDoc
# createDoc.makeDoc(redREnviron.directoryNames['redRDir'])
def createHelpIndex(self):
import docSearcher
docSearcher.createIndex(redREnviron.directoryNames['redRDir'])
def regressionTest(self, val):
import redRRegressionTest
redRRegressionTest.test(val)
def addTemplateDirectory(self):
"""This function is | |
# 学习制作网易云音乐客户端。
# 此文件实现登陆查询等一系列功能。
__author__ = 'weiy'
"""
4.10日。
"""
import urllib.parse
import requests
import hashlib
import json
def shotlist(lst):
"""列表去重。"""
temp1 = sorted(list(set(lst)))
return temp1
class WebApi:
"""一些功能。"""
default_timeout = 10
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Proxy-Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'music.163.com',
'Referer': 'http://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36'
}
def __init__(self):
self.cookies = {
'appver': '1.6.1.82809',
'os': 'pc'
}
def httpRequest(self, action, method="GET", add=None, data=None, headers=headers, cookies='',\
timeout=default_timeout, urlencode='utf-8'):
"""
默认以get方式请求,
GET方式附加内容用add参数,POST方式提交内容用data参数。
编码用urlencode参数,默认utf-8。
GET方式返回json形式请求的内容。
POST方式返回cookies和json形式的内容。(0,1)
默认cookies为空。
"""
if method.upper() == 'GET':
if add:
html = requests.get(action, params=add, headers=headers, cookies=cookies, timeout=timeout)
else:
html = requests.get(action, headers=headers, cookies=cookies, timeout=timeout)
html.encoding = urlencode
return json.loads(html.text)
elif method.upper() == 'POST':
if data:
html = requests.post(action, data=data, headers=headers, cookies=cookies, timeout=timeout)
else:
html = requests.post(action, headers=headers, cookies=cookies, timeout=timeout)
html.encoding = urlencode
return html.cookies, json.loads(html.text)
def login(self, username, password):
"""
以网易账号登陆,其他的登陆待写。返回cookies和json形式内容。
"""
data = {
'username': username,
'password': hashlib.md5(password.encode('utf-8')).hexdigest(),
'remeberLogin': 'true'
}
cki = self.httpRequest('http://music.163.com/api/login', method="POST", data=data)
cki[0].set('appver', self.cookies['appver'], domain='music.163.com')
cki[0].set('os', self.cookies['os'], domain='music.163.com')
return cki[0], cki[1]
def user_playlist(self, uid, offset=0):
"""
个人歌单。
"""
url = 'http://music.163.com/api/user/playlist/?offset=%s&limit=1000&uid=%s' % (offset, uid)
html = self.httpRequest(url, method='GET', cookies=self.cookies)
return html['playlist']
def all_playlist(self, cat='全部歌单', types='all', offset=0, index=1):
"""
全部歌单。列表字典形式。
"""
url = 'http://music.163.com/api/playlist/list?cat=%s&type=%s&order=%s&offset=%d&total=true&limit=30&index=%d)'\
% (urllib.parse.quote(cat), types, types, offset, index)
html = self.httpRequest(url, method='GET', cookies=self.cookies)
return html['playlists']
def details_playlist(self,id):
return '''{
"result": {
"subscribers": [],
"subscribed": false,
"creator": {
"defaultAvatar": false,
"province": 110000,
"authStatus": 1,
"followed": false,
"avatarUrl": "http://p1.music.126.net/QWMV-Ru_6149AKe0mCBXKg==/1420569024374784.jpg",
"accountStatus": 0,
"gender": 1,
"city": 110101,
"birthday": -2209017600000,
"userId": 1,
"userType": 2,
"nickname": "网易云音乐",
"signature": "欢迎使用网易云音乐,有任何问题可以联系@云音乐客服, 我们会尽快答复。有关独立音乐人和独立厂牌请站内私信@原创君。",
"description": "网易云音乐官方账号",
"detailDescription": "网易云音乐官方账号",
"avatarImgId": 1420569024374784,
"backgroundImgId": 2002210674180202,
"backgroundUrl": "http://p1.music.126.net/pmHS4fcQtcNEGewNb5HRhg==/2002210674180202.jpg",
"authority": 3,
"mutual": false,
"expertTags": null,
"experts": null,
"djStatus": 10,
"vipType": 11,
"remarkName": null,
"avatarImgIdStr": "1420569024374784",
"backgroundImgIdStr": "2002210674180202"
},
"artists": null,
"tracks": [{
"name": "火焰小溪",
"id": 1297750771,
"position": 1,
"alias": ["\"声音,你好\"公益活动主题曲"],
"status": 0,
"fee": 8,
"copyrightId": 677020,
"disc": "",
"no": 1,
"artists": [{
"name": "林宥嘉",
"id": 3685,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "火焰小溪",
"id": 72071867,
"type": "EP/Single",
"size": 2,
"picId": 109951163440625910,
"blurPicUrl": "http://p2.music.126.net/HeSyftZftDogVH1VkFqN1A==/109951163440625910.jpg",
"companyId": 0,
"pic": 109951163440625910,
"picUrl": "http://p2.music.126.net/HeSyftZftDogVH1VkFqN1A==/109951163440625910.jpg",
"publishTime": 1533398400007,
"description": "",
"tags": "",
"company": "华研",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": ["\"声音,你好\"公益活动主题曲"],
"status": 1,
"copyrightId": 677020,
"commentThreadId": "R_AL_3_72071867",
"artists": [{
"name": "林宥嘉",
"id": 3685,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "109951163440625910"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 274997,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_1297750771",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 0,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 3419924588,
"size": 11002819,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 274997,
"volumeDelta": -2.0
},
"mMusic": {
"name": "",
"id": 3419924589,
"size": 6601709,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 274997,
"volumeDelta": -2.0
},
"lMusic": {
"name": "",
"id": 3419924590,
"size": 4401154,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 274997,
"volumeDelta": -1.0
},
"bMusic": {
"name": "",
"id": 3419924590,
"size": 4401154,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 274997,
"volumeDelta": -1.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5965315
}, {
"name": "说谎",
"id": 108390,
"position": 6,
"alias": [],
"status": 0,
"fee": 8,
"copyrightId": 677020,
"disc": "1",
"no": 6,
"artists": [{
"name": "林宥嘉",
"id": 3685,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "感官/世界",
"id": 10764,
"type": "专辑",
"size": 11,
"picId": 109951163187404137,
"blurPicUrl": "http://p2.music.126.net/mMZNB-jhYsw29K61QtopJA==/109951163187404137.jpg",
"companyId": 0,
"pic": 109951163187404137,
"picUrl": "http://p2.music.126.net/mMZNB-jhYsw29K61QtopJA==/109951163187404137.jpg",
"publishTime": 1256832000000,
"description": "",
"tags": "",
"company": "华研国际",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": 40,
"copyrightId": 1004,
"commentThreadId": "R_AL_3_10764",
"artists": [{
"name": "林宥嘉",
"id": 3685,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "109951163187404137"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 264160,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": "600902000009129439",
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_108390",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 1426514912,
"size": 10569187,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 264160,
"volumeDelta": 0.0
},
"mMusic": {
"name": "",
"id": 1426514913,
"size": 6341529,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 264160,
"volumeDelta": 0.0
},
"lMusic": {
"name": "",
"id": 1426514914,
"size": 4227701,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 264160,
"volumeDelta": 0.0
},
"bMusic": {
"name": "",
"id": 1426514914,
"size": 4227701,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 264160,
"volumeDelta": 0.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5842732
}, {
"name": "Perfect",
"id": 460043703,
"position": 5,
"alias": [],
"status": 0,
"fee": 8,
"copyrightId": 7002,
"disc": "1",
"no": 5,
"artists": [{
"name": "<NAME>",
"id": 33184,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "÷ (Deluxe)",
"id": 35150843,
"type": "专辑",
"size": 16,
"picId": 18810444929762432,
"blurPicUrl": "http://p2.music.126.net/ARJwzJcDmmd0PYArKnmGCg==/18810444929762432.jpg",
"companyId": 0,
"pic": 18810444929762432,
"picUrl": "http://p2.music.126.net/ARJwzJcDmmd0PYArKnmGCg==/18810444929762432.jpg",
"publishTime": 1488470400007,
"description": "",
"tags": "",
"company": "华纳唱片",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": 3,
"copyrightId": 7002,
"commentThreadId": "R_AL_3_35150843",
"artists": [{
"name": "<NAME>",
"id": 33184,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "18810444929762432"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 263400,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_460043703",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": null,
"id": 1286918421,
"size": 10538885,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 263400,
"volumeDelta": -1.2,
"dfsId_str": null
},
"mMusic": {
"name": null,
"id": 1286918422,
"size": 5269465,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 160000,
"playTime": 263400,
"volumeDelta": -0.76,
"dfsId_str": null
},
"lMusic": {
"name": null,
"id": 1286918423,
"size": 3161697,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 96000,
"playTime": 263400,
"volumeDelta": -0.79,
"dfsId_str": null
},
"bMusic": {
"name": null,
"id": 1286918423,
"size": 3161697,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 96000,
"playTime": 263400,
"volumeDelta": -0.79,
"dfsId_str": null
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5725016
}, {
"name": "River",
"id": 523250559,
"position": 3,
"alias": [],
"status": 0,
"fee": 4,
"copyrightId": 7003,
"disc": "1",
"no": 5,
"artists": [{
"name": "Eminem",
"id": 32665,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}, {
"name": "<NAME>",
"id": 33184,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "Revival",
"id": 36952205,
"type": "专辑",
"size": 19,
"picId": 18448705602723085,
"blurPicUrl": "http://p2.music.126.net/v-c-6B2aS4sZ_G-i97uiUg==/18448705602723085.jpg",
"companyId": 0,
"pic": 18448705602723085,
"picUrl": "http://p2.music.126.net/v-c-6B2aS4sZ_G-i97uiUg==/18448705602723085.jpg",
"publishTime": 1513296000000,
"description": "",
"tags": "",
"company": "环球唱片",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": -4,
"copyrightId": 7003,
"commentThreadId": "R_AL_3_36952205",
"artists": [{
"name": "Eminem",
"id": 32665,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "18448705602723085"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 221013,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_523250559",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 1398129440,
"size": 8843015,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 221013,
"volumeDelta": -1.0
},
"mMusic": {
"name": "",
"id": 1398129441,
"size": 5305826,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 221013,
"volumeDelta": -1.0
},
"lMusic": {
"name": "",
"id": 1398129442,
"size": 3537232,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 221013,
"volumeDelta": -1.0
},
"bMusic": {
"name": "",
"id": 1398129442,
"size": 3537232,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 221013,
"volumeDelta": -1.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5841160
}, {
"name": "Best of 2017 Medley",
"id": 526652668,
"position": 1,
"alias": [],
"status": 0,
"fee": 0,
"copyrightId": 0,
"disc": "1",
"no": 1,
"artists": [{
"name": "<NAME>",
"id": 86862,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "Best of 2017 Medley",
"id": 37099076,
"type": "EP/Single",
"size": 1,
"picId": 109951163095074756,
"blurPicUrl": "http://p2.music.126.net/0dLExQOaZRizEGO4XQ45eA==/109951163095074756.jpg",
"companyId": 0,
"pic": 109951163095074756,
"picUrl": "http://p2.music.126.net/0dLExQOaZRizEGO4XQ45eA==/109951163095074756.jpg",
"publishTime": 1513872000000,
"description": "",
"tags": "",
"company": "Wavy Records",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": 0,
"copyrightId": 0,
"commentThreadId": "R_AL_3_37099076",
"artists": [{
"name": "<NAME>",
"id": 86862,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "109951163095074756"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 145998,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_526652668",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 0,
"transName": null,
"sign": null,
"hMusic": null,
"mMusic": {
"name": "",
"id": 1402340446,
"size": 3504631,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 145998,
"volumeDelta": 0.0
},
"lMusic": {
"name": "",
"id": 1402340447,
"size": 2336435,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 145998,
"volumeDelta": 0.0
},
"bMusic": {
"name": "",
"id": 1402340447,
"size": 2336435,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 145998,
"volumeDelta": 0.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5796031
}, {
"name": "极美",
"id": 516657215,
"position": 10,
"alias": [],
"status": 0,
"fee": 4,
"copyrightId": 7003,
"disc": "1",
"no": 10,
"artists": [{
"name": "孙燕姿",
"id": 9272,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "孙燕姿No. 13作品:跳舞的梵谷",
"id": 36714070,
"type": "专辑",
"size": 10,
"picId": 18357446138140955,
"blurPicUrl": "http://p2.music.126.net/_VjuIgInJqwxdyoy4FF3IA==/18357446138140955.jpg",
"companyId": 0,
"pic": 18357446138140955,
"picUrl": "http://p2.music.126.net/_VjuIgInJqwxdyoy4FF3IA==/18357446138140955.jpg",
"publishTime": 1510185600000,
"description": "",
"tags": "",
"company": "环球唱片",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": -4,
"copyrightId": 7003,
"commentThreadId": "R_AL_3_36714070",
"artists": [{
"name": "孙燕姿",
"id": 9272,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "18357446138140955"
},
"starred": false,
"popularity": 95.0,
"score": 95,
"starredNum": 0,
"duration": 211613,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_516657215",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 1402501222,
"size": 8466852,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 211613,
"volumeDelta": 0.0
},
"mMusic": {
"name": "",
"id": 1402501223,
"size": 5080129,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 211613,
"volumeDelta": 0.0
},
"lMusic": {
"name": "",
"id": 1402501224,
"size": 3386767,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 211613,
"volumeDelta": 0.0
},
"bMusic": {
"name": "",
"id": 1402501224,
"size": 3386767,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 211613,
"volumeDelta": 0.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5810580
}, {
"name": "连名带姓",
"id": 522352195,
"position": 3,
"alias": [],
"status": 0,
"fee": 4,
"copyrightId": 7003,
"disc": "",
"no": 6,
"artists": [{
"name": "张惠妹",
"id": 10559,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "偷故事的人 ",
"id": 36941045,
"type": "专辑",
"size": 10,
"picId": 109951163079119875,
"blurPicUrl": "http://p2.music.126.net/SbJdGLDz9V1_sLffpjMU8g==/109951163079119875.jpg",
"companyId": 0,
"pic": 109951163079119875,
"picUrl": "http://p2.music.126.net/SbJdGLDz9V1_sLffpjMU8g==/109951163079119875.jpg",
"publishTime": 1513008000007,
"description": "",
"tags": "",
"company": "环球唱片",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": -4,
"copyrightId": 7003,
"commentThreadId": "R_AL_3_36941045",
"artists": [{
"name": "张惠妹",
"id": 10559,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "109951163079119875"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 333549,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_522352195",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 1404214114,
"size": 13344436,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 320000,
"playTime": 333549,
"volumeDelta": 0.0
},
"mMusic": {
"name": "",
"id": 1404214115,
"size": 8006679,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 192000,
"playTime": 333549,
"volumeDelta": 0.0
},
"lMusic": {
"name": "",
"id": 1404214116,
"size": 5337800,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 333549,
"volumeDelta": 0.0
},
"bMusic": {
"name": "",
"id": 1404214116,
"size": 5337800,
"extension": "mp3",
"sr": 44100,
"dfsId": 0,
"bitrate": 128000,
"playTime": 333549,
"volumeDelta": 0.0
},
"mp3Url": null,
"rtype": 0,
"rurl": null,
"mvid": 5764014
}, {
"name": "Look What You Made Me Do",
"id": 501133611,
"position": 3,
"alias": [],
"status": 0,
"fee": 4,
"copyrightId": 7003,
"disc": "1",
"no": 6,
"artists": [{
"name": "<NAME>",
"id": 44266,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"album": {
"name": "reputation",
"id": 36709029,
"type": "专辑",
"size": 15,
"picId": 109951163054654501,
"blurPicUrl": "http://p2.music.126.net/fdh0myRe6FD87QNJtvGe_A==/109951163054654501.jpg",
"companyId": 0,
"pic": 109951163054654501,
"picUrl": "http://p2.music.126.net/fdh0myRe6FD87QNJtvGe_A==/109951163054654501.jpg",
"publishTime": 1510243200007,
"description": "",
"tags": "",
"company": "环球唱片",
"briefDesc": "",
"artist": {
"name": "",
"id": 0,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
},
"songs": [],
"alias": [],
"status": -4,
"copyrightId": 7003,
"commentThreadId": "R_AL_3_36709029",
"artists": [{
"name": "<NAME>",
"id": 44266,
"picId": 0,
"img1v1Id": 0,
"briefDesc": "",
"picUrl": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"img1v1Url": "http://p2.music.126.net/6y-UleORITEDbvrOLV0Q8A==/5639395138885805.jpg",
"albumSize": 0,
"alias": [],
"trans": "",
"musicSize": 0
}],
"subType": "录音室版",
"transName": null,
"picId_str": "109951163054654501"
},
"starred": false,
"popularity": 100.0,
"score": 100,
"starredNum": 0,
"duration": 211859,
"playedNum": 0,
"dayPlays": 0,
"hearTime": 0,
"ringtone": null,
"crbt": null,
"audition": null,
"copyFrom": "",
"commentThreadId": "R_SO_4_501133611",
"rtUrl": null,
"ftype": 0,
"rtUrls": [],
"copyright": 2,
"transName": null,
"sign": null,
"hMusic": {
"name": "",
"id": 1350204242,
"size": | |
^ (ch_id in bn_nega_idx) \
else int(math.ceil(th_per_ch))
for c in range(ch):
threshold_table[c, -1] = 1 \
if np.all(threshold_table[c, 1:-1] > threshold_table[c, :-2], axis=0) else -1
if np.all(threshold_table[c, 1:-1] == threshold_table[c, :-2], axis=0):
threshold_table[c, -1] = 1
threshold_table[c, 0:-1] = max_th_value
bits_per_word = 32
rem = (bits_per_word - ch % bits_per_word) % bits_per_word
pad = np.ones((rem, n + 1), dtype=np.int32)
threshold_table = np.vstack((threshold_table, pad))
# Put the thresholds into list
conv_node.thresholds = threshold_table.flatten().tolist()
# get nodes to be removed after being disconnected
get_nodes_in_branch(activation_quantizer_node, conv_node, to_be_removed)
# Disconnect the outputs of the quantizer
out_ops = activation_quantizer_node.output_ops['output']
for output_node in out_ops:
for input_name, input_node in output_node.input_ops.items():
if input_node == activation_quantizer_node:
output_node.add_input(input_name, conv_node)
# Disconnect the outputs of the conv
conv_node.remove_output('Y')
conv_node.add_outputs({'Y': out_ops})
for op in to_be_removed:
graph.remove_op(op)
def pass_pack_weights(graph: Graph) -> None:
"""Given a Quantized convolution node C, it will pack the weights of C into 32 bit words.
If the node Q that apply quantization to the weights of C quantizes, for example, into 1 bit values
then one 32 bit word will contain 32 weights.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
exec_list = [n for n in sort_graph(graph) if n.op_type == 'Conv']
quantization_types = [
'QTZ_binary_mean_scaling',
'QTZ_linear_mid_tread_half',
'QTZ_binary_channel_wise_mean_scaling'
]
word_size = 32
weight_bitwidth = 1
packer = Packer(weight_bitwidth, word_size)
to_be_removed = []
b = 32
for m in exec_list:
conv_node = m
# check if this is a quantized convolution
if not conv_node.quantizer or not conv_node.a_quantizer:
continue
# Check if we support this kind of quantizer
weight_quantizer = conv_node.quantizer
if weight_quantizer.op_type not in quantization_types:
continue
# Quantize the weights
weight_quantizer.run_forward()
def pad_to_multiple_of_b(tensor, axis, b):
shape = list(tensor.shape)
pad = (((shape[axis] + b - 1) // b) * b) - shape[axis]
shape[axis] = pad
return np.zeros(shape) if pad else None
padded_data = np.copy(weight_quantizer.data)
for axis in [0, 3]:
pad_tensor = pad_to_multiple_of_b(padded_data, axis, b)
if pad_tensor is not None:
padded_data = np.append(padded_data, pad_tensor, axis=axis)
tca_output = np.copy(padded_data)
oc, kh, kw, kd = padded_data.shape[:]
padded_data = padded_data.flatten()
tca_output = tca_output.flatten()
out_index = 0
for g in range(oc // b):
for p in range(kd // b):
for h in range(kh):
for w in range(kw):
for o in range(b):
for d in range(b):
idx = g * (kw * kh * kd * b) + p * b + h * (kw * kd) + w * kd + o * (kw * kh * kd) + d
tca_output[out_index] = padded_data[idx]
out_index += 1
kn2row_output = np.zeros(oc * kh * kw * kd)
out_index = 0
for h in range(kh):
for w in range(kw):
for o in range(oc):
for i in range(kd):
idx = o * kh * kw * kd + h * kw * kd + w * kd + i
kn2row_output[out_index] = padded_data[idx]
out_index += 1
op_data = weight_quantizer.binarizer(padded_data)
data = packer.run(op_data.astype(np.float32), weight_quantizer.dimension)
tca_binarized_data = weight_quantizer.binarizer(tca_output)
tca_packed_data = packer.run(tca_binarized_data.astype(np.float32), weight_quantizer.dimension)
kn2row_binarized_data = weight_quantizer.binarizer(kn2row_output)
kn2row_data = packer.run(kn2row_binarized_data.astype(np.float32), weight_quantizer.dimension)
shape = [oc, kh, kw, kd]
tca_shape = [oc // b, kd // b, kh, kw, b, b]
kn2row_shape = [kh, kw, oc, kd]
# Create the new constant with the quantized weights
quantized_constant = Constant(
weight_quantizer.name + '_new',
PackedUint32(),
data=np.vectorize(lambda k: (~k) & ((0x1 << 32) - 1))(data),
dimension_format="OHWI",
transposed_dimension_format="OhIhHWOlIl",
packed=True,
actual_shape=shape,
transposed_shape=tca_shape,
transposed_data=[(~k) & ((0x1 << 32) - 1) for k in tca_packed_data.flatten()],
kn2row_data=[k for k in kn2row_data.flatten()],
kn2row_shape=kn2row_shape,
kn2row_dimension_format="HWOI"
)
# get nodes to be removed after being disconnected
get_nodes_in_branch(weight_quantizer, None, to_be_removed)
# Add the constant to the graph and connect the new constant
graph.add_op(quantized_constant)
quantized_constant.add_outputs(weight_quantizer.output_ops)
for output_name, consumer_list in weight_quantizer.output_ops.items():
for consumer_node in consumer_list:
for input_name, input_node in consumer_node.input_ops.items():
if input_node == weight_quantizer:
consumer_node.add_input(input_name, quantized_constant)
break
for op in to_be_removed:
graph.remove_op(op)
def pass_quantize_convolutions(graph: Graph) -> None:
"""Given a convolution node C, if C has proper quantization details, it will mark C as quantized and it will
assign the correct output data types to the node C and its quantizers. Note that the expected output data type
on the runtime is defined as QUANTIZED_NOT_PACKED.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
b = 32
exec_list = [n for n in sort_graph(graph) if n.op_type == 'Conv']
for m in exec_list:
conv_node = m
# check if this is a quantized convolution
if not conv_node.quantizer or not conv_node.a_quantizer:
continue
# Mark as quantized convolution
conv_node.is_quantized = True
# change the output data type of the convolution if thresholds are available
if conv_node.has_thresholds:
conv_node.dtype = QUANTIZED_PACKED()
height = conv_node.height
width = conv_node.width
depth = conv_node.channel
depth_upper = (depth + b - 1) // b
conv_node.update_shape([depth_upper, height, width, 2, b], "ChHWBCl")
# change the output data type of the quantizers
conv_node.quantizer.dtype = PackedUint32()
for qtz in conv_node.a_quantizer:
if isinstance(qtz, Lookup):
continue
qtz.dtype = QUANTIZED_PACKED()
height = qtz.height
width = qtz.width
depth = qtz.channel
depth_upper = (depth + b - 1) // b
qtz.update_shape([height, width, depth_upper, 2, b], "HWChBCl")
def pass_propagate_datatypes(graph) -> None:
"""Further propagate output data types.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
exec_list = sort_graph(graph)
for m in exec_list:
if m.op_type != 'Conv' and m.preserve_quantization:
m.dtype = m.input_nodes[0].dtype
def pass_propagate_format(graph) -> None:
"""Further propagate output data types.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
exec_list = sort_graph(graph)
for m in exec_list:
if m.op_type != 'Conv' and m.preserve_quantization:
if m.input_nodes[0].dimension == 'ChHWBCl':
b = 32
shape = [(m.channel + b - 1) // b, m.height, m.width, 2, b]
m.update_shape(shape, m.input_nodes[0].dimension)
elif m.input_nodes[0].dimension == 'HWChBCl':
b = 32
shape = [m.height, m.width, (m.channel + b - 1) // b, 2, b]
m.update_shape(shape, m.input_nodes[0].dimension)
def pass_propagate_output_type_backward(graph: Graph) -> None:
"""It is assumed that the output data type of a Graph is float.
We should propagate this assumption backwards from the output node of the graph to the
latest quantized convolution available.
There could be cases where the latest convolution node Q is a quantized convolution and we also apply
thresholds to its outputs. In this cases, the quantized convolution output data type should be float
even if thresholds are applied.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
exec_list = sort_graph(graph)
def output_dtype_changer(node, otype):
for n in node.input_nodes:
if n.op_type == 'Conv' and n.is_quantized:
n.restore_shape()
n.dtype = otype
return
output_dtype_changer(n, otype)
# propagate output data type to the last quantized convolution
output_node = exec_list[-1]
output_type = output_node.dtype
output_dtype_changer(output_node, output_type)
def pass_lookup(graph: Graph) -> None:
"""Lookup.
Args:
graph (Graph): The input graph. It will be modified in-place.
"""
quantization_types = [
'QTZ_binary_mean_scaling',
'QTZ_linear_mid_tread_half',
'QTZ_binary_channel_wise_mean_scaling'
]
to_be_removed = []
exec_list = [n for n in sort_graph(graph) if n.op_type in quantization_types]
placeholder = [n for n in sort_graph(graph) if n.op_type in 'Input']
for m in exec_list:
quantizer = m
p1 = quantizer.input_nodes[0]
if p1.op_type != 'Reshape':
continue
p2 = p1.input_nodes[0]
if p2.op_type != 'Reshape':
continue
p3 = p2.input_nodes[0]
if p3.op_type != 'Gather':
continue
p4 = p3.input_nodes[0]
if p4.op_type != 'Gather':
continue
gather_params = p4.input_nodes[0]
if gather_params.rank != 2 or gather_params.shape[0] != 256:
continue
params = gather_params.data
data = {'data': params}
qtz_data = quantizer.run(**data)['data']
word_size = 32
lu_bitwidth = quantizer.nbit
packer = Packer(lu_bitwidth, word_size)
lsb = np.zeros((256,), np.uint32)
msb = np.zeros((256,), np.uint32)
idx = 0
for p in qtz_data:
data = packer.run(p.astype(np.float32), p.shape).flatten()
lsb[idx] = data[0]
msb[idx] = data[1]
idx += 1
pe_lsb = Constant('pe_lsb_new', QUANTIZED_PACKED_KERNEL(), lsb,
dimension_format='TC', packed=True, actual_shape=[256, word_size])
pe_msb = Constant('pe_msb_new', QUANTIZED_PACKED_KERNEL(), msb,
dimension_format='TC', packed=True, actual_shape=[256, word_size])
n, h, w, c = quantizer.shape
shape = [1, h, w, 2, word_size]
pe = Lookup('Lookup', shape, QUANTIZED_PACKED(),
{'input': placeholder[0], 'lsb': pe_lsb, 'msb': pe_msb}, dimension_format='ChHWBCl')
get_nodes_in_branch(quantizer, placeholder[0], to_be_removed)
placeholder[0].remove_output('output')
placeholder[0].add_output('output', pe)
pe.add_outputs(quantizer.output_ops)
output_op = quantizer.output_op_list[0]
target_input_name = 'X'
for input_name in output_op._input_names:
if quantizer.equals(output_op._input_ops[input_name]):
target_input_name = input_name
break
output_op.add_input(target_input_name, pe)
graph.add_op(pe_lsb)
graph.add_op(pe_msb)
graph.add_op(pe)
for op in to_be_removed:
graph.remove_op(op)
def pass_simplify_batchnorm(graph: Graph) -> None:
"""Simplify BarchNorm operator.
"""
exec_list = | |
'66:02:52',
'66:02:53',
'66:02:54',
'66:02:55',
'66:02:56',
'66:02:57',
'66:02:58',
'66:02:59',
'66:02:60',
'66:02:61',
'66:02:62',
'66:02:63',
'66:02:64',
'66:02:65',
'66:02:66',
'66:02:67',
'66:02:68',
'66:02:69',
'66:02:70',
'66:02:71',
'66:02:72',
'66:02:73',
'66:02:74',
'66:02:75',
'66:02:76',
'66:02:77',
'66:02:78',
'66:02:79',
'66:02:80',
'66:02:81',
'66:02:82',
'66:02:83',
'66:02:84',
'66:02:85',
'66:02:86',
'66:02:87',
'66:02:88',
'66:02:89',
'66:02:90',
'66:02:91',
'66:02:92',
'66:02:93',
'66:02:94',
'66:02:95',
'66:02:96',
'66:02:97',
'66:02:98',
'66:02:99',
'66:03:00',
'66:03:01',
'66:03:02',
'66:03:03',
'66:03:04',
'66:03:05',
'66:03:06',
'66:03:07',
'66:03:08',
'66:03:09',
'66:03:10',
'66:03:11',
'66:03:12',
'66:03:13',
'66:03:14',
'66:03:15',
'66:03:16',
'66:03:17',
'66:03:18',
'66:03:19',
'66:03:20',
'66:03:21',
'66:03:22',
'66:03:23',
'66:03:24',
'66:03:25',
'66:03:26',
'66:03:27',
'66:03:28',
'66:03:29',
'66:03:30',
'66:03:31',
'66:03:32',
'66:03:33',
'66:03:34',
'66:03:35',
'66:03:36',
'66:03:37',
'66:03:38',
'66:03:39',
'66:03:40',
'66:03:41',
'66:03:42',
'66:03:43',
'66:03:44',
'66:03:45',
'66:03:46',
'66:03:47',
'66:03:48',
'66:03:49',
'66:03:50',
'66:03:51',
'66:03:52',
'66:03:53',
'66:03:54',
'66:03:55',
'66:03:56',
'66:03:57',
'66:03:58',
'66:03:59',
'66:03:60',
'66:03:61',
'66:03:62',
'66:03:63',
'66:03:64',
'66:03:65',
'66:03:66',
'66:03:67',
'66:03:68',
'66:03:69',
'66:03:70',
'66:03:71',
'66:03:72',
'66:03:73',
'66:03:74',
'66:03:75',
'66:03:76',
'66:03:77',
'66:03:78',
'66:03:79',
'66:03:80',
'66:03:81',
'66:03:82',
'66:03:83',
'66:03:84',
'66:03:85',
'66:03:86',
'66:03:87',
'66:03:88',
'66:03:89',
'66:03:90',
'66:03:91',
'66:03:92',
'66:03:93',
'66:03:94',
'66:03:95',
'66:03:96',
'66:03:97',
'66:03:98',
'66:03:99',
'66:04:00',
'66:04:01',
'66:04:02',
'66:04:03',
'66:04:04',
'66:04:05',
'66:04:06',
'66:04:07',
'66:04:08',
'66:04:09',
'66:04:10',
'66:04:11',
'66:04:12',
'66:04:13',
'66:04:14',
'66:04:15',
'66:04:16',
'66:04:17',
'66:04:18',
'66:04:19',
'66:04:20',
'66:04:21',
'66:04:22',
'66:04:23',
'66:04:24',
'66:04:25',
'66:04:26',
'66:04:27',
'66:04:28',
'66:04:29',
'66:04:30',
'66:04:31',
'66:04:32',
'66:04:33',
'66:04:34',
'66:04:35',
'66:04:36',
'66:04:37',
'66:04:38',
'66:04:39',
'66:04:40',
'66:04:41',
'66:04:42',
'66:04:43',
'66:04:44',
'66:04:45',
'66:04:46',
'66:04:47',
'66:04:48',
'66:04:49',
'66:04:50',
'66:04:51',
'66:04:52',
'66:04:53',
'66:04:54',
'66:04:55',
'66:04:56',
'66:04:57',
'66:04:58',
'66:04:59',
'66:04:60',
'66:04:61',
'66:04:62',
'66:04:63',
'66:04:64',
'66:04:65',
'66:04:66',
'66:04:67',
'66:04:68',
'66:04:69',
'66:04:70',
'66:04:71',
'66:04:72',
'66:04:73',
'66:04:74',
'66:04:75',
'66:04:76',
'66:04:77',
'66:04:78',
'66:04:79',
'66:04:80',
'66:04:81',
'66:04:82',
'66:04:83',
'66:04:84',
'66:04:85',
'66:04:86',
'66:04:87',
'66:04:88',
'66:04:89',
'66:04:90',
'66:04:91',
'66:04:92',
'66:04:93',
'66:04:94',
'66:04:95',
'66:04:96',
'66:04:97',
'66:04:98',
'66:04:99',
'66:05:00',
'66:05:01',
'66:05:02',
'66:05:03',
'66:05:04',
'66:05:05',
'66:05:06',
'66:05:07',
'66:05:08',
'66:05:09',
'66:05:10',
'66:05:11',
'66:05:12',
'66:05:13',
'66:05:14',
'66:05:15',
'66:05:16',
'66:05:17',
'66:05:18',
'66:05:19',
'66:05:20',
'66:05:21',
'66:05:22',
'66:05:23',
'66:05:24',
'66:05:25',
'66:05:26',
'66:05:27',
'66:05:28',
'66:05:29',
'66:05:30',
'66:05:31',
'66:05:32',
'66:05:33',
'66:05:34',
'66:05:35',
'66:05:36',
'66:05:37',
'66:05:38',
'66:05:39',
'66:05:40',
'66:05:41',
'66:05:42',
'66:05:43',
'66:05:44',
'66:05:45',
'66:05:46',
'66:05:47',
'66:05:48',
'66:05:49',
'66:05:50',
'66:05:51',
'66:05:52',
'66:05:53',
'66:05:54',
'66:05:55',
'66:05:56',
'66:05:57',
'66:05:58',
'66:05:59',
'66:05:60',
'66:05:61',
'66:05:62',
'66:05:63',
'66:05:64',
'66:05:65',
'66:05:66',
'66:05:67',
'66:05:68',
'66:05:69',
'66:05:70',
'66:05:71',
'66:05:72',
'66:05:73',
'66:05:74',
'66:05:75',
'66:05:76',
'66:05:77',
'66:05:78',
'66:05:79',
'66:05:80',
'66:05:81',
'66:05:82',
'66:05:83',
'66:05:84',
'66:05:85',
'66:05:86',
'66:05:87',
'66:05:88',
'66:05:89',
'66:05:90',
'66:05:91',
'66:05:92',
'66:05:93',
'66:05:94',
'66:05:95',
'66:05:96',
'66:05:97',
'66:05:98',
'66:05:99',
'66:06:00',
'66:06:01',
'66:06:02',
'66:06:03',
'66:06:04',
'66:06:05',
'66:06:06',
'66:06:07',
'66:06:08',
'66:06:09',
'66:06:10',
'66:06:11',
'66:06:12',
'66:06:13',
'66:06:14',
'66:06:15',
'66:06:16',
'66:06:17',
'66:06:18',
'66:06:19',
'66:06:20',
'66:06:21',
'66:06:22',
'66:06:23',
'66:06:24',
'66:06:25',
'66:06:26',
'66:06:27',
'66:06:28',
'66:06:29',
'66:06:30',
'66:06:31',
'66:06:32',
'66:06:33',
'66:06:34',
'66:06:35',
'66:06:36',
'66:06:37',
'66:06:38',
'66:06:39',
'66:06:40',
'66:06:41',
'66:06:42',
'66:06:43',
'66:06:44',
'66:06:45',
'66:06:46',
'66:06:47',
'66:06:48',
'66:06:49',
'66:06:50',
'66:06:51',
'66:06:52',
'66:06:53',
'66:06:54',
'66:06:55',
'66:06:56',
'66:06:57',
'66:06:58',
'66:06:59',
'66:06:60',
'66:06:61',
'66:06:62',
'66:06:63',
'66:06:64',
'66:06:65',
'66:06:66',
'66:06:67',
'66:06:68',
'66:06:69',
'66:06:70',
'66:06:71',
'66:06:72',
'66:06:73',
'66:06:74',
'66:06:75',
'66:06:76',
'66:06:77',
'66:06:78',
'66:06:79',
'66:06:80',
'66:06:81',
'66:06:82',
'66:06:83',
'66:06:84',
'66:06:85',
'66:06:86',
'66:06:87',
'66:06:88',
'66:06:89',
'66:06:90',
'66:06:91',
'66:06:92',
'66:06:93',
'66:06:94',
'66:06:95',
'66:06:96',
'66:06:97',
'66:06:98',
'66:06:99',
'66:07:00',
'66:07:01',
'66:07:02',
'66:07:03',
'66:07:04',
'66:07:05',
'66:07:06',
'66:07:07',
'66:07:08',
'66:07:09',
'66:07:10',
'66:07:11',
'66:07:12',
'66:07:13',
'66:07:14',
'66:07:15',
'66:07:16',
'66:07:17',
'66:07:18',
'66:07:19',
'66:07:20',
'66:07:21',
'66:07:22',
'66:07:23',
'66:07:24',
'66:07:25',
'66:07:26',
'66:07:27',
'66:07:28',
'66:07:29',
'66:07:30',
'66:07:31',
'66:07:32',
'66:07:33',
'66:07:34',
'66:07:35',
'66:07:36',
'66:07:37',
'66:07:38',
'66:07:39',
'66:07:40',
'66:07:41',
'66:07:42',
'66:07:43',
'66:07:44',
'66:07:45',
'66:07:46',
'66:07:47',
'66:07:48',
'66:07:49',
'66:07:50',
'66:07:51',
'66:07:52',
'66:07:53',
'66:07:54',
'66:07:55',
'66:07:56',
'66:07:57',
'66:07:58',
'66:07:59',
'66:07:60',
'66:07:61',
'66:07:62',
'66:07:63',
'66:07:64',
'66:07:65',
'66:07:66',
'66:07:67',
'66:07:68',
'66:07:69',
'66:07:70',
'66:07:71',
'66:07:72',
'66:07:73',
'66:07:74',
'66:07:75',
'66:07:76',
'66:07:77',
'66:07:78',
'66:07:79',
'66:07:80',
'66:07:81',
'66:07:82',
'66:07:83',
'66:07:84',
'66:07:85',
'66:07:86',
'66:07:87',
'66:07:88',
'66:07:89',
'66:07:90',
'66:07:91',
'66:07:92',
'66:07:93',
'66:07:94',
'66:07:95',
'66:07:96',
'66:07:97',
'66:07:98',
'66:07:99',
'66:08:00',
'66:08:01',
'66:08:02',
'66:08:03',
'66:08:04',
'66:08:05',
'66:08:06',
'66:08:07',
'66:08:08',
'66:08:09',
'66:08:10',
'66:08:11',
'66:08:12',
'66:08:13',
'66:08:14',
'66:08:15',
'66:08:16',
'66:08:17',
'66:08:18',
'66:08:19',
'66:08:20',
'66:08:21',
'66:08:22',
'66:08:23',
'66:08:24',
'66:08:25',
'66:08:26',
'66:08:27',
'66:08:28',
'66:08:29',
'66:08:30',
'66:08:31',
'66:08:32',
'66:08:33',
'66:08:34',
'66:08:35',
'66:08:36',
'66:08:37',
'66:08:38',
'66:08:39',
'66:08:40',
'66:08:41',
'66:08:42',
'66:08:43',
'66:08:44',
'66:08:45',
'66:08:46',
'66:08:47',
'66:08:48',
'66:08:49',
'66:08:50',
'66:08:51',
'66:08:52',
'66:08:53',
'66:08:54',
'66:08:55',
'66:08:56',
'66:08:57',
'66:08:58',
'66:08:59',
'66:08:60',
'66:08:61',
'66:08:62',
'66:08:63',
'66:08:64',
'66:08:65',
'66:08:66',
'66:08:67',
'66:08:68',
'66:08:69',
'66:08:70',
'66:08:71',
'66:08:72',
'66:08:73',
'66:08:74',
'66:08:75',
'66:08:76',
'66:08:77',
'66:08:78',
'66:08:79',
'66:08:80',
'66:08:81',
'66:08:82',
'66:08:83',
'66:08:84',
'66:08:85',
'66:08:86',
'66:08:87',
'66:08:88',
'66:08:89',
'66:08:90',
'66:08:91',
'66:08:92',
'66:08:93',
'66:08:94',
'66:08:95',
'66:08:96',
'66:08:97',
'66:08:98',
'66:08:99',
'66:09:00',
'66:09:01',
'66:09:02',
'66:09:03',
'66:09:04',
'66:09:05',
'66:09:06',
'66:09:07',
'66:09:08',
'66:09:09',
'66:09:10',
'66:09:11',
'66:09:12',
'66:09:13',
'66:09:14',
'66:09:15',
'66:09:16',
'66:09:17',
'66:09:18',
'66:09:19',
'66:09:20',
'66:09:21',
'66:09:22',
'66:09:23',
'66:09:24',
'66:09:25',
'66:09:26',
'66:09:27',
'66:09:28',
'66:09:29',
'66:09:30',
'66:09:31',
'66:09:32',
'66:09:33',
'66:09:34',
'66:09:35',
'66:09:36',
'66:09:37',
'66:09:38',
'66:09:39',
'66:09:40',
'66:09:41',
'66:09:42',
'66:09:43',
'66:09:44',
'66:09:45',
'66:09:46',
'66:09:47',
'66:09:48',
'66:09:49',
'66:09:50',
'66:09:51',
'66:09:52',
'66:09:53',
'66:09:54',
'66:09:55',
'66:09:56',
'66:09:57',
'66:09:58',
'66:09:59',
'66:09:60',
'66:09:61',
'66:09:62',
'66:09:63',
'66:09:64',
'66:09:65',
'66:09:66',
'66:09:67',
'66:09:68',
'66:09:69',
'66:09:70',
'66:09:71',
'66:09:72',
'66:09:73',
'66:09:74',
'66:09:75',
'66:09:76',
'66:09:77',
'66:09:78',
'66:09:79',
'66:09:80',
'66:09:81',
'66:09:82',
'66:09:83',
'66:09:84',
'66:09:85',
'66:09:86',
'66:09:87',
'66:09:88',
'66:09:89',
'66:09:90',
'66:09:91',
'66:09:92',
'66:09:93',
'66:09:94',
'66:09:95',
'66:09:96',
'66:09:97',
'66:09:98',
'66:09:99',
'66:10:00',
'66:10:01',
'66:10:02',
'66:10:03',
'66:10:04',
'66:10:05',
'66:10:06',
'66:10:07',
'66:10:08',
'66:10:09',
'66:10:10',
'66:10:11',
'66:10:12',
'66:10:13',
'66:10:14',
'66:10:15',
'66:10:16',
'66:10:17',
'66:10:18',
'66:10:19',
'66:10:20',
'66:10:21',
'66:10:22',
'66:10:23',
'66:10:24',
'66:10:25',
'66:10:26',
'66:10:27',
'66:10:28',
'66:10:29',
'66:10:30',
'66:10:31',
'66:10:32',
'66:10:33',
'66:10:34',
'66:10:35',
'66:10:36',
'66:10:37',
'66:10:38',
'66:10:39',
'66:10:40',
'66:10:41',
'66:10:42',
'66:10:43',
'66:10:44',
'66:10:45',
'66:10:46',
'66:10:47',
'66:10:48',
'66:10:49',
'66:10:50',
'66:10:51',
'66:10:52',
'66:10:53',
'66:10:54',
'66:10:55',
'66:10:56',
'66:10:57',
'66:10:58',
'66:10:59',
'66:10:60',
'66:10:61',
'66:10:62',
'66:10:63',
'66:10:64',
'66:10:65',
'66:10:66',
'66:10:67',
'66:10:68',
'66:10:69',
'66:10:70',
'66:10:71',
'66:10:72',
'66:10:73',
'66:10:74',
'66:10:75',
'66:10:76',
'66:10:77',
'66:10:78',
'66:10:79',
'66:10:80',
'66:10:81',
'66:10:82',
'66:10:83',
'66:10:84',
'66:10:85',
'66:10:86',
'66:10:87',
'66:10:88',
'66:10:89',
'66:10:90',
'66:10:91',
'66:10:92',
'66:10:93',
'66:10:94',
'66:10:95',
'66:10:96',
'66:10:97',
'66:10:98',
'66:10:99',
'66:11:00',
'66:11:01',
'66:11:02',
'66:11:03',
'66:11:04',
'66:11:05',
'66:11:06',
'66:11:07',
'66:11:08',
'66:11:09',
'66:11:10',
'66:11:11',
'66:11:12',
'66:11:13',
'66:11:14',
'66:11:15',
'66:11:16',
'66:11:17',
'66:11:18',
'66:11:19',
'66:11:20',
'66:11:21',
'66:11:22',
'66:11:23',
'66:11:24',
'66:11:25',
'66:11:26',
'66:11:27',
'66:11:28',
'66:11:29',
'66:11:30',
'66:11:31',
'66:11:32',
'66:11:33',
'66:11:34',
'66:11:35',
'66:11:36',
'66:11:37',
'66:11:38',
'66:11:39',
'66:11:40',
'66:11:41',
'66:11:42',
'66:11:43',
'66:11:44',
'66:11:45',
'66:11:46',
'66:11:47',
'66:11:48',
'66:11:49',
'66:11:50',
'66:11:51',
'66:11:52',
'66:11:53',
'66:11:54',
'66:11:55',
'66:11:56',
'66:11:57',
'66:11:58',
'66:11:59',
'66:11:60',
'66:11:61',
'66:11:62',
'66:11:63',
'66:11:64',
'66:11:65',
'66:11:66',
'66:11:67',
'66:11:68',
'66:11:69',
'66:11:70',
'66:11:71',
'66:11:72',
'66:11:73',
'66:11:74',
'66:11:75',
'66:11:76',
'66:11:77',
'66:11:78',
'66:11:79',
'66:11:80',
'66:11:81',
'66:11:82',
'66:11:83',
'66:11:84',
'66:11:85',
'66:11:86',
'66:11:87',
'66:11:88',
'66:11:89',
'66:11:90',
'66:11:91',
'66:11:92',
'66:11:93',
'66:11:94',
'66:11:95',
'66:11:96',
'66:11:97',
'66:11:98',
'66:11:99',
'66:12:00',
'66:12:01',
'66:12:02',
'66:12:03',
'66:12:04',
'66:12:05',
'66:12:06',
'66:12:07',
'66:12:08',
'66:12:09',
'66:12:10',
'66:12:11',
'66:12:12',
'66:12:13',
'66:12:14',
'66:12:15',
'66:12:16',
'66:12:17',
'66:12:18',
'66:12:19',
'66:12:20',
'66:12:21',
'66:12:22',
'66:12:23',
'66:12:24',
'66:12:25',
'66:12:26',
'66:12:27',
'66:12:28',
'66:12:29',
'66:12:30',
'66:12:31',
'66:12:32',
'66:12:33',
'66:12:34',
'66:12:35',
'66:12:36',
'66:12:37',
'66:12:38',
'66:12:39',
'66:12:40',
'66:12:41',
'66:12:42',
'66:12:43',
'66:12:44',
'66:12:45',
'66:12:46',
'66:12:47',
'66:12:48',
'66:12:49',
'66:12:50',
'66:12:51',
'66:12:52',
'66:12:53',
'66:12:54',
'66:12:55',
'66:12:56',
'66:12:57',
'66:12:58',
'66:12:59',
'66:12:60',
'66:12:61',
'66:12:62',
'66:12:63',
'66:12:64',
'66:12:65',
'66:12:66',
'66:12:67',
'66:12:68',
'66:12:69',
'66:12:70',
'66:12:71',
'66:12:72',
'66:12:73',
'66:12:74',
'66:12:75',
'66:12:76',
'66:12:77',
'66:12:78',
'66:12:79',
'66:12:80',
'66:12:81',
'66:12:82',
'66:12:83',
'66:12:84',
'66:12:85',
'66:12:86',
'66:12:87',
'66:12:88',
'66:12:89',
'66:12:90',
'66:12:91',
'66:12:92',
'66:12:93',
'66:12:94',
'66:12:95',
'66:12:96',
'66:12:97',
'66:12:98',
'66:12:99',
'66:13:00',
'66:13:01',
'66:13:02',
'66:13:03',
'66:13:04',
'66:13:05',
'66:13:06',
'66:13:07',
'66:13:08',
'66:13:09',
'66:13:10',
'66:13:11',
'66:13:12',
'66:13:13',
'66:13:14',
'66:13:15',
'66:13:16',
'66:13:17',
'66:13:18',
'66:13:19',
'66:13:20',
'66:13:21',
'66:13:22',
'66:13:23',
'66:13:24',
'66:13:25',
'66:13:26',
'66:13:27',
'66:13:28',
'66:13:29',
'66:13:30',
'66:13:31',
'66:13:32',
'66:13:33',
'66:13:34',
'66:13:35',
'66:13:36',
'66:13:37',
'66:13:38',
'66:13:39',
'66:13:40',
'66:13:41',
'66:13:42',
'66:13:43',
'66:13:44',
'66:13:45',
'66:13:46',
'66:13:47',
'66:13:48',
'66:13:49',
'66:13:50',
'66:13:51',
'66:13:52',
'66:13:53',
'66:13:54',
'66:13:55',
'66:13:56',
'66:13:57',
'66:13:58',
'66:13:59',
'66:13:60',
'66:13:61',
'66:13:62',
'66:13:63',
'66:13:64',
'66:13:65',
'66:13:66',
'66:13:67',
'66:13:68',
'66:13:69',
'66:13:70',
'66:13:71',
'66:13:72',
'66:13:73',
'66:13:74',
'66:13:75',
'66:13:76',
'66:13:77',
'66:13:78',
'66:13:79',
'66:13:80',
'66:13:81',
'66:13:82',
'66:13:83',
'66:13:84',
'66:13:85',
'66:13:86',
'66:13:87',
'66:13:88',
'66:13:89',
'66:13:90',
'66:13:91',
'66:13:92',
'66:13:93',
'66:13:94',
'66:13:95',
'66:13:96',
'66:13:97',
'66:13:98',
'66:13:99',
'66:14:00',
'66:14:01',
'66:14:02',
'66:14:03',
'66:14:04',
'66:14:05',
'66:14:06',
'66:14:07',
'66:14:08',
'66:14:09',
'66:14:10',
'66:14:11',
'66:14:12',
'66:14:13',
'66:14:14',
'66:14:15',
'66:14:16',
'66:14:17',
'66:14:18',
'66:14:19',
'66:14:20',
'66:14:21',
'66:14:22',
'66:14:23',
'66:14:24',
'66:14:25',
'66:14:26',
'66:14:27',
'66:14:28',
'66:14:29',
'66:14:30',
'66:14:31',
'66:14:32',
'66:14:33',
'66:14:34',
'66:14:35',
'66:14:36',
'66:14:37',
'66:14:38',
'66:14:39',
'66:14:40',
'66:14:41',
'66:14:42',
'66:14:43',
'66:14:44',
'66:14:45',
'66:14:46',
'66:14:47',
'66:14:48',
'66:14:49',
'66:14:50',
'66:14:51',
'66:14:52',
'66:14:53',
'66:14:54',
'66:14:55',
'66:14:56',
'66:14:57',
'66:14:58',
'66:14:59',
'66:14:60',
'66:14:61',
'66:14:62',
'66:14:63',
'66:14:64',
'66:14:65',
'66:14:66',
'66:14:67',
'66:14:68',
'66:14:69',
'66:14:70',
'66:14:71',
'66:14:72',
'66:14:73',
'66:14:74',
'66:14:75',
'66:14:76',
'66:14:77',
'66:14:78',
'66:14:79',
'66:14:80',
'66:14:81',
'66:14:82',
'66:14:83',
'66:14:84',
'66:14:85',
'66:14:86',
'66:14:87',
'66:14:88',
'66:14:89',
'66:14:90',
'66:14:91',
'66:14:92',
'66:14:93',
'66:14:94',
'66:14:95',
'66:14:96',
'66:14:97',
'66:14:98',
'66:14:99',
'66:15:00',
'66:15:01',
'66:15:02',
'66:15:03',
'66:15:04',
'66:15:05',
'66:15:06',
'66:15:07',
'66:15:08',
'66:15:09',
'66:15:10',
'66:15:11',
'66:15:12',
'66:15:13',
'66:15:14',
'66:15:15',
'66:15:16',
'66:15:17',
'66:15:18',
'66:15:19',
'66:15:20',
'66:15:21',
'66:15:22',
'66:15:23',
'66:15:24',
'66:15:25',
'66:15:26',
'66:15:27',
'66:15:28',
'66:15:29',
'66:15:30',
'66:15:31',
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""Class for tracking using a track model."""
# 老cls+新reg
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os.path as osp
import numpy as np
import cv2
from cv2 import imwrite
from copy import deepcopy
from utils.infer_utils import convert_bbox_format, Rectangle
from utils.misc_utils import get_center, get
from scripts.anchor_related import Anchor
# chose model:
# model 1: normal SSD with signal expanded
# model 2: normal SSD without signal expanded
# model 3: siamrpn's encode method
MODEL = 2
class TargetState(object):
"""Represent the target state."""
def __init__(self, bbox, search_pos, scale_idx):
self.bbox = bbox # (cx, cy, w, h) in the original image
self.search_pos = search_pos # target center position in the search image
self.scale_idx = scale_idx # scale index in the searched scales
class Tracker(object):
"""Tracker based on the siamese model."""
def __init__(self, siamese_model, model_config, track_config):
self.siamese_model = siamese_model
self.model_config = model_config
self.track_config = track_config
self.num_scales = track_config['num_scales']
logging.info('track num scales -- {}'.format(self.num_scales))
scales = np.arange(self.num_scales) - get_center(self.num_scales)
self.search_factors = [self.track_config['scale_step'] ** x for x in scales]
self.x_image_size = track_config['x_image_size'] # Search image size
self.window = None # Cosine window
self.log_level = track_config['log_level']
self.anchor_op = Anchor(17, 17)
self.anchors = self.anchor_op.anchors
self.anchors = self.anchor_op.corner_to_center(self.anchors)
def track(self, sess, first_bbox, frames, logdir='/tmp'):
"""Runs tracking on a single image sequence."""
# Get initial target bounding box and convert to center based
bbox = convert_bbox_format(first_bbox, 'center-based')
# Feed in the first frame image to set initial state.
bbox_feed = [bbox.y, bbox.x, bbox.height, bbox.width]
input_feed = [frames[0], bbox_feed]
frame2crop_scale = self.siamese_model.initialize(sess, input_feed)
# Storing target state
original_target_height = bbox.height
original_target_width = bbox.width
search_center = np.array([get_center(self.x_image_size),
get_center(self.x_image_size)])
current_target_state = TargetState(bbox=bbox,
search_pos=search_center,
scale_idx=int(get_center(self.num_scales)))
include_first = get(self.track_config, 'include_first', False)
logging.info('Tracking include first -- {}'.format(include_first))
# Run tracking loop
reported_bboxs = []
# 读取gt画在image crop上,来验证回归框
# f = open('./__Data/tracking-one-Curation/Data/VID/train/a/202008280001/track.txt')
# gt_box = f.readlines()
for i, filename in enumerate(frames):
if i > 0 or include_first: # We don't really want to process the first image unless intended to do so.
# current_target_state:前一帧的bbox信息
bbox_feed = [current_target_state.bbox.y, current_target_state.bbox.x,
current_target_state.bbox.height, current_target_state.bbox.width]
input_feed = [filename, bbox_feed]
# 将当前帧和前一帧的bbox送进模型,得到响应图,对当前帧进行滑窗检测
outputs, metadata = self.siamese_model.inference_step(sess, input_feed)
search_scale_list = outputs['scale_xs'] # 缩放倍数
response = outputs['response_up']
response_size = response.shape[1]
reg_pred = outputs['reg_pred']
# Choose the scale whole response map has the highest peak
if self.num_scales > 1:
response_max = np.max(response, axis=(1, 2))
penalties = self.track_config['scale_penalty'] * np.ones((self.num_scales))
current_scale_idx = int(get_center(self.num_scales))
penalties[current_scale_idx] = 1.0
response_penalized = response_max * penalties
best_scale = np.argmax(response_penalized)
else:
best_scale = 0
response = response[best_scale]
response_show = deepcopy(response) # 保留原有的response
# decode
bboxes = np.zeros_like(reg_pred) # [289, 4]
if MODEL == 1: # model 1: normal SSD with signal expanded
bboxes[:, 0] = reg_pred[:, 0] * self.anchors[:, 2] * 0.1 + self.anchors[:, 0] # anchors: cx, cy, w, h
bboxes[:, 1] = reg_pred[:, 1] * self.anchors[:, 3] * 0.1 + self.anchors[:, 1]
bboxes[:, 2] = np.exp(reg_pred[:, 2] * 0.2) * self.anchors[:, 2]
bboxes[:, 3] = np.exp(reg_pred[:, 3] * 0.2) * self.anchors[:, 3] # [x,y,w,h], 289*4
elif MODEL == 2: # model 2: normal SSD without signal expanded
bboxes[:, 0] = reg_pred[:, 0] * self.anchors[:, 2] + self.anchors[:, 0] # anchors: cx, cy, w, h
bboxes[:, 1] = reg_pred[:, 1] * self.anchors[:, 3] + self.anchors[:, 1]
bboxes[:, 2] = np.exp(reg_pred[:, 2]) * self.anchors[:, 2]
bboxes[:, 3] = np.exp(reg_pred[:, 3]) * self.anchors[:, 3] # [x,y,w,h], 289*4
elif MODEL == 3: # model 3: siamrpn's encode method
bboxes[:, 0] = reg_pred[:, 0] * self.anchors[:, 2] + self.anchors[:, 0] # anchors: cx, cy, w, h
bboxes[:, 1] = reg_pred[:, 1] * self.anchors[:, 3] + self.anchors[:, 1]
bboxes[:, 2] = np.exp(reg_pred[:, 2]) * self.anchors[:, 2]
bboxes[:, 3] = np.exp(reg_pred[:, 3]) * self.anchors[:, 3] # [x,y,w,h], 289*4
else:
print("please chose a decode method!")
with np.errstate(all='raise'): # Raise error if something goes wrong
response = response - np.min(response)
response = response / np.sum(response)
if self.window is None:
window = np.dot(np.expand_dims(np.hanning(response_size), 1),
np.expand_dims(np.hanning(response_size), 0))
self.window = window / np.sum(window) # normalize window
window_influence = self.track_config['window_influence']
response = (1 - window_influence) * response + window_influence * self.window
# Find maximum response
r_max, c_max = np.unravel_index(response.argmax(),
response.shape)
# Convert from crop-relative coordinates to frame coordinates
# 坐标转换,从相对坐标转换为帧坐标
p_coor = np.array([r_max, c_max]) # 得分最高的点的坐标索引
# displacement from the center in instance final representation ...
disp_instance_final = p_coor - get_center(response_size) # 最大值与上一帧目标中心的相对位移
# ... in instance feature space ... 双线性插值法进行上采样
upsample_factor = self.track_config['upsample_factor'] # upsample factor=16
disp_instance_feat = disp_instance_final / upsample_factor # 映射到17*17的score map上的相对位移
# ... Avoid empty position ...
r_radius = int(response_size / upsample_factor / 2) # r = 8
disp_instance_feat = np.maximum(np.minimum(disp_instance_feat, r_radius), -r_radius) # 保证disp_instance_feat不会越界
# ... in instance input ...
disp_instance_input = disp_instance_feat * self.model_config['embed_config']['stride']
# ... in instance original crop (in frame coordinates)
disp_instance_frame = disp_instance_input / search_scale_list[best_scale]
# Position within frame in frame coordinates
y = current_target_state.bbox.y
x = current_target_state.bbox.x
temp_y = current_target_state.bbox.y
temp_x = current_target_state.bbox.x
y += disp_instance_frame[0]
x += disp_instance_frame[1]
bboxes_temp = bboxes.reshape(17, 17, 4)
box = bboxes_temp[int(round(r_max/upsample_factor)), int(round(c_max/upsample_factor)), :]
# Target scale damping and saturation
target_scale = current_target_state.bbox.height / original_target_height
search_factor = self.search_factors[best_scale]
scale_damp = self.track_config['scale_damp'] # damping factor for scale update
target_scale *= ((1 - scale_damp) * 1.0 + scale_damp * search_factor)
target_scale = np.maximum(0.2, np.minimum(5.0, target_scale))
# Some book keeping
height = original_target_height * target_scale
width = original_target_width * target_scale
current_target_state.bbox = Rectangle(x, y, width, height)
current_target_state.scale_idx = best_scale
current_target_state.search_pos = search_center + disp_instance_input
assert 0 <= current_target_state.search_pos[0] < self.x_image_size, \
'target position in feature space should be no larger than input image size'
assert 0 <= current_target_state.search_pos[1] < self.x_image_size, \
'target position in feature space should be no larger than input image size'
if self.log_level > 0:
np.save(osp.join(logdir, 'num_frames.npy'), [i + 1])
# Select the image with the highest score scale and convert it to uint8
image_cropped = outputs['image_cropped'][best_scale].astype(np.uint8)
# Note that imwrite in cv2 assumes the image is in BGR format.
# However, the cropped image returned by TensorFlow is RGB.
# Therefore, we convert color format using cv2.cvtColor
imwrite(osp.join(logdir, 'image_cropped{}.jpg'.format(i)),
cv2.cvtColor(image_cropped, cv2.COLOR_RGB2BGR))
np.save(osp.join(logdir, 'best_scale{}.npy'.format(i)), [best_scale])
np.save(osp.join(logdir, 'response{}.npy'.format(i)), response)
y_search, x_search = current_target_state.search_pos
search_scale = search_scale_list[best_scale]
target_height_search = height * search_scale
target_width_search = width * search_scale
bbox_search = Rectangle(x_search, y_search, target_width_search, target_height_search)
bbox_search = convert_bbox_format(bbox_search, 'top-left-based')
np.save(osp.join(logdir, 'bbox{}.npy'.format(i)),
[bbox_search.x, bbox_search.y, bbox_search.width, bbox_search.height])
####################################################################################################
# 画出每一个框
def iou(p1, p2, p3, p4):
s_rec1 = (p2[1] - p1[1]) * (p2[0] - p1[0])
s_rec2 = (p4[1] - p3[1]) * (p4[0] - p3[0])
sum_area = s_rec1 + s_rec2
left = max(p1[0], p3[0])
right = min(p2[0], p4[0])
top = max(p1[1], p3[1])
bottom = min(p2[1], p4[1])
if left >= right or top >= bottom:
return 0
else:
intersect = (right - left) * (bottom - top)
return (intersect / (sum_area - intersect))*1.0
m = len(bboxes)
gt_box_i = gt_box[i].split(',')
rp1 = (int(gt_box_i[0]), int(gt_box_i[1]))
rp2 = (int(gt_box_i[2]), int(gt_box_i[3]))
# image = cv2.imread(filename)
image_cropped = cv2.rectangle(image_cropped, rp1, rp2, (255, 255, 255))
for j in range(m):
p1 = (int(np.round(bboxes[j, 0] - bboxes[j, 2] / 2)), int(np.round(bboxes[j, 1] - bboxes[j, 3] / 2)))
p2 = (int(np.round(bboxes[j, 0] + bboxes[j, 2] / 2)), int(np.round(bboxes[j, 1] + bboxes[j, 3] / 2)))
# pc = np.array([bboxes[j, 0], bboxes[j, 1]])
# disp_final = pc - get_center(response_size)
# disp_input = disp_final / search_scale_list[best_scale]
# temp_x += disp_input[0]
# temp_y += disp_input[1] # 注意xy的顺序有没有反
# temp_w = bboxes[j, 2] / search_scale_list[best_scale]
# temp_h = bboxes[j, 3] / search_scale_list[best_scale)
# final_box = convert_bbox_format(temp_box, 'top-left-based')
riou = iou(p1, p2, rp1, rp2)
if riou >= 0.4:
image_cropped_to_write = deepcopy(image_cropped)
coor_x = int(j / 17)
coor_y = j % 17
txt_str = str(np.round(response_show[coor_x * 16, coor_y * 16], 3)) + ', (' + str(coor_x) + ',' + str(coor_y) + ')'
if reg_pred[j, 0] + reg_pred[j, 1] + reg_pred[j, 2] + reg_pred[j, 3] == 0:
image_cropped_to_write = cv2.rectangle(image_cropped_to_write, p1, p2, (0, 0, 255))
imwrite(osp.join(logdir, 'test/image_cropped{}_{}.jpg'.format(i, j)),
cv2.cvtColor(image_cropped_to_write, cv2.COLOR_RGB2BGR))
elif riou < 0.55:
# image = cv2.rectangle(image, (int(round(final_box[0])), int(round(final_box[1]))), (int(round(final_box[2])), int(round(final_box[3]))), (0, riou | |
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
""" Incident poller for a ProofPoint TRAP server """
import logging
import time
import pprint
import re
from threading import Thread
from resilient_circuits import ResilientComponent, handler
from resilient import SimpleHTTPException
from resilient_lib.components.integration_errors import IntegrationError
from fn_proofpoint_trap.lib.helpers import validate_opts
from fn_proofpoint_trap.lib.pptr_client import PPTRClient
"""
Summary:
Threaded Poller to pull in Proofpoint TRAP Trap Incidents and Events
Naming:
Resilient Incidents will be named in the following format:
Proofpoint TRAP TRAP: {} - {}
Incident Number
Incident Description
Data Table:
Proofpoint TRAP TRAP Events
API Name: proofpoint_trap_events
Example Data for Table:
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Redirected to known phishing site',
'id': 18,
'received': '2019-03-25T15:39:14Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Redirected to known phishing site'}
Additional Data for Incident:
'incident_field_values': [{'name': 'Severity', 'value': 'Informational'},
{'name': 'Classification', 'value': 'Phishing'},
{'name': 'Attack Vector', 'value': 'Email'},
{'name': 'Abuse Disposition', 'value': None}],
Message Destination:
fn_proofpoint_trap
Initial Functions:
fn_proofpoint_trap_get_incident_details
Input Fields:
trap_incident_id
Initial Codegen Performed:
codegen -m fn_proofpoint_trap -f fn_proofpoint_trap_get_incident_details
--datatable proofpoint_trap_events -p fn_proofpoint_trap
Full Example Payload:
{'assignee': 'Unassigned',
'created_at': '2019-03-25T15:30:13Z',
'description': '',
'event_count': 14,
'event_sources': ['Proofpoint TRAP TAP'],
'events': [{'attackDirection': 'inbound',
'category': 'phish',
'id': 19,
'received': '2019-03-26T18:44:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'},
{'attackDirection': 'inbound',
'category': 'phish',
'id': 22,
'received': '2019-03-26T18:55:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Redirected to known phishing site',
'id': 18,
'received': '2019-03-25T15:39:14Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Redirected to known phishing site'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Redirected to known phishing site',
'id': 20,
'received': '2019-03-26T18:44:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Redirected to known phishing site'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Malicious content dropped during execution',
'id': 23,
'received': '2019-03-26T18:55:50Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Malicious content dropped during execution'},
{'attackDirection': 'inbound',
'category': 'phish',
'id': 17,
'received': '2019-03-25T15:39:13Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'},
{'attackDirection': 'inbound',
'category': 'phish',
'id': 14,
'received': '2019-03-25T15:34:13Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'},
{'attackDirection': 'inbound',
'category': 'phish',
'id': 13,
'received': '2019-03-25T15:30:13Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Malicious content dropped during execution',
'id': 15,
'received': '2019-03-25T15:34:13Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Malicious content dropped during execution'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Redirected to known phishing site',
'id': 21,
'received': '2019-03-26T18:55:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Redirected to known phishing site'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Malicious content dropped during execution',
'id': 25,
'received': '2019-03-26T18:56:50Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Malicious content dropped during execution'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Malicious content dropped during execution',
'id': 16,
'received': '2019-03-25T15:39:13Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Malicious content dropped during execution'},
{'attackDirection': 'inbound',
'category': 'phish',
'description': 'Redirected to known phishing site',
'id': 26,
'received': '2019-03-26T18:56:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked',
'threatname': 'Redirected to known phishing site'},
{'attackDirection': 'inbound',
'category': 'phish',
'id': 24,
'received': '2019-03-26T18:56:49Z',
'severity': 'Info',
'source': 'Proofpoint TRAP TAP',
'state': 'Linked'}],
'failed_quarantines': 0,
'hosts': {'attacker': ['172.16.17.32',
'http://calina.info/payporte.php/',
'172.16.17.32',
'http://www.willype.info/house.php/'],
'cnc': ['192.168.3.11',
'172.16.58.3',
'172.16.58.3',
'172.16.17.32',
'192.168.3.11',
'192.168.127.12',
'172.16.31.10',
'192.168.127.12',
'172.16.31.10',
'172.16.17.32'],
'forensics': ['www.willype.info',
'calina.info',
'http://calina.info/payporte.php/',
'http://www.willype.info/house.php/?email=redacted_email',
'http://calina.info/payporte.php/?email=redacted_email',
'http://www.willype.info/house.php/']},
'id': 7,
'incident_field_values': [{'name': 'Severity', 'value': 'Informational'},
{'name': 'Classification', 'value': 'Phishing'},
{'name': 'Attack Vector', 'value': 'Email'},
{'name': 'Abuse Disposition', 'value': None}],
'pending_quarantines': 0,
'quarantine_results': [],
'score': 1400,
'state': 'New',
'successful_quarantines': 0,
'summary': 'Malicious content dropped during execution',
'team': 'Unassigned',
'updated_at': '2019-03-26T18:56:50Z',
'users': []}
"""
# Map of Proofpoint TRAP event field to data table column
PROOFPOINT_TRAP_EVENTS_MAP = {
'id': 'event_id',
'description': 'event_description',
'category': 'event_category',
'attackDirection': 'event_attackdirection',
'severity': 'event_severity',
'source': 'event_source',
'threatname': 'event_threatname',
'state': 'event_state',
'received': 'event_received',
}
# API Name(s) of Data Table(s)
DATA_TABLE_IDS = [
'proofpoint_trap_events',
]
# Incident Fields returned from TRAP inside the
# incident_field_values list
# List of Dictionaries
# Format Example for list items:
# {'name': 'Classification', 'value': 'Phishing' }
INCIDENT_FIELD_NAMES = [
'Severity',
'Classification',
'Attack Vector',
'Abuse Disposition',
]
# Relevant TIMESTAMPS from TRAP
TIMESTAMPS = [
'created_at',
'updated_at',
]
# Incident Types
# API Field is incident_type_ids @ Resilient (int)
# API Field is incident_field_values where name: Classification (dict)
CLASS2TYPEID = {
'MALWARE' : 19,
'Malware': 19,
'Phishing': 22,
'phish': 22,
'Denial of Service': 21,
'Other': 18,
'Communication Error': 17,
'System Intrusion': 20,
}
""" "incident_type_ids": [
{
"name": "System Intrusion",
"id": 20
},
{
"name": "Lost documents / files / records",
"id": 4
},
{
"name": "Lost PC / laptop / tablet",
"id": 3
},
{
"name": "Denial of Service",
"id": 21
},
{
"name": "Communication error (fax; email)",
"id": 17
},
{
"name": "Improper disposal: digital asset(s)",
"id": 6
},
{
"name": "Lost PDA / smartphone",
"id": 1
},
{
"name": "Improper disposal: documents / files",
"id": 7
},
{
"name": "Malware",
"id": 19
},
{
"name": "Lost storage device / media",
"id": 8
}
]
"""
# Nist Attack Vectors
# API Field is nist_attack_vectors @ Resilient
# API Field is incident_field_values where name: Attack Vectors @ TRAP
NIST_VECTORS = {
'Email': 4,
'Impersonation': 5,
'Attrition': 2,
'External Media': 1,
'Improper Usage': 6,
'Loss or Theft of Equipment': 7,
'Web': 3,
'Other': 8,
}
"""
"nist_attack_vectors": [
{
"name": "External/Removable Media",
"id": 1
},
{
"name": "Loss or Theft of Equipment",
"id": 7
},
{
"name": "Attrition (Denial-of-Service and Brute-Force Attacks)",
"id": 2
},
{
"name": "Other",
"id": 8
},
{
"name": "Improper Usage",
"id": 6
},
{
"name": "Web",
"id": 3
},
{
"name": "E-mail",
"id": 4
},
{
"name": "Impersonation",
"id": 5
}
]
"""
# Abuse Disposition Fields
# API Field is confirmed @ Resilient
# API Field is incident_field_values where name: Abuse Disposition @ TRAP
ABUSE_DISPOSITION = {
None: False,
'None': False,
'Confirmed': True
}
# API Field is Severity @ Resilient
# API Field is incident_field_values where name: Severity @ TRAP
INCIDENT_SEVERITY = {
# Resilient has no 'Informational' so temporarily filter down to 'Low'
'Informational': 'Low',
'Low': 'Low',
'Medium': 'Medium',
'High': 'High',
# Resilient has no 'Critical' so temporarily filter down to 'High'
'Critical': 'High'
}
# Custom Incident Fields
CUSTOM_FIELDS = [
# TRAP Incidents id field to be posted to Resilient Incidents
'proofpoint_trap_incident_id',
]
## Regular Expression Definitions for Artifact Extraction
REGEX_DEFS = {
'URL': r'^((?:http(?:s)?\://)[a-zA-Z0-9\.\?\#\&\=\/_-]{1,})$',
'IP Address': r'^((?:[0-9]{1,3}\.){3}(?:[0-9]{1,3}))$',
'DNS Name': r'^((?!(?:http(s)?:\/\/?))(?:(?:[\w-]{1,}[\.])){1,}([a-zA-Z]{2,}))$',
}
LOG = logging.getLogger(__name__)
class PPTRIncidentPolling(ResilientComponent):
"""Component that polls for new data arriving from Proofpoint TRAP"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(PPTRIncidentPolling, self).__init__(opts)
self.options = opts.get("fn_proofpoint_trap", {})
validate_opts(self)
self.stop_thread = False
self.threads = []
self.main()
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_proofpoint_trap", {})
validate_opts(self)
self.stop_thread = True
self.main()
def main(self):
"""main entry point, instiantiate polling thread"""
# initialize last update to startup interval if present, otherwise update interval
options = self.options
startup_interval = options.get('startup_interval', None)
if startup_interval is not None:
startup_interval = int(startup_interval)
self.lastupdate = startup_interval
polling_interval = int(options.get("polling_interval", 0))
self.state = options.get('state', None)
# Use a timeout value of polling_interval (in secs) + 10 secs to wait for all threads to end.
thread_timeout = (polling_interval * 60) + 10
# Wait for threads to stop within thread timeout interval.
stop_time = time.time() + thread_timeout
while any(t.isAlive for t in self.threads) and (time.time() < stop_time):
time.sleep(0.1)
# Get rid of stopped threads from list.
self.threads = [t for t in self.threads if t.is_alive()]
if self.threads:
# Polling threads still running raise runtime error.
LOG.error("There were %d polling threads which did not stop within timeout period on restart",
len(self.threads))
raise RuntimeError("There were {} polling threads which did not stop within timeout period on restart."
.format(len(self.threads)))
# Turn off 'stop_thread' flag.
self.stop_thread = False
if polling_interval > 0:
# Create and start polling thread
thread = Thread(target=self.polling_thread)
self.threads.append(thread)
thread.daemon = True
thread.start()
LOG.info("Polling for incidents in Proofpoint TRAP every %d minutes", polling_interval)
else:
LOG.info("Polling for incidents in Proofpoint TRAP not enabled")
def polling_thread(self):
"""contents of polling thread, alternately check for | |
'duk_bi_date_prototype_set_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + BI_DATE_FLAG_LOCALTIME + (1 << 12) } },
{ 'name': 'setUTCMilliseconds', 'native': 'duk_bi_date_prototype_set_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + (1 << 12) } },
{ 'name': 'setSeconds', 'native': 'duk_bi_date_prototype_set_shared', 'length': 2, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + BI_DATE_FLAG_LOCALTIME + (2 << 12) } },
{ 'name': 'setUTCSeconds', 'native': 'duk_bi_date_prototype_set_shared', 'length': 2, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + (2 << 12) } },
{ 'name': 'setMinutes', 'native': 'duk_bi_date_prototype_set_shared', 'length': 3, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + BI_DATE_FLAG_LOCALTIME + (3 << 12) } },
{ 'name': 'setUTCMinutes', 'native': 'duk_bi_date_prototype_set_shared', 'length': 3, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + (3 << 12) } },
{ 'name': 'setHours', 'native': 'duk_bi_date_prototype_set_shared', 'length': 4, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + BI_DATE_FLAG_LOCALTIME + (4 << 12) } },
{ 'name': 'setUTCHours', 'native': 'duk_bi_date_prototype_set_shared', 'length': 4, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_TIMESETTER + (4 << 12) } },
{ 'name': 'setDate', 'native': 'duk_bi_date_prototype_set_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_LOCALTIME + (1 << 12) } },
{ 'name': 'setUTCDate', 'native': 'duk_bi_date_prototype_set_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': 0 + (1 << 12) } },
{ 'name': 'setMonth', 'native': 'duk_bi_date_prototype_set_shared', 'length': 2, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_LOCALTIME + (2 << 12) } },
{ 'name': 'setUTCMonth', 'native': 'duk_bi_date_prototype_set_shared', 'length': 2, 'varargs': True, 'magic': { 'type': 'plain', 'value': 0 + (2 << 12) } },
{ 'name': 'setFullYear', 'native': 'duk_bi_date_prototype_set_shared', 'length': 3, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_NAN_TO_ZERO + BI_DATE_FLAG_LOCALTIME + (3 << 12) } },
{ 'name': 'setUTCFullYear', 'native': 'duk_bi_date_prototype_set_shared', 'length': 3, 'varargs': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_NAN_TO_ZERO + (3 << 12) } },
# Non-standard extensions: E5 Section B.2.4, B.2.5, B.2.6
#
# 'length' values are not given explicitly but follows the general rule.
# The lengths below agree with V8.
{ 'name': 'getYear', 'native': 'duk_bi_date_prototype_get_shared', 'length': 0, 'section_b': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_LOCALTIME + BI_DATE_FLAG_SUB1900 + (BI_DATE_IDX_YEAR << 12) } },
{ 'name': 'setYear', 'native': 'duk_bi_date_prototype_set_shared', 'length': 1, 'section_b': True, 'magic': { 'type': 'plain', 'value': BI_DATE_FLAG_NAN_TO_ZERO + BI_DATE_FLAG_YEAR_FIXUP + (3 << 12) } },
# Note: toGMTString() is required to initially be the same Function object as the initial
# Date.prototype.toUTCString. In other words: Date.prototype.toGMTString === Date.prototype.toUTCString --> true.
# This is implemented as a special post-tweak in duk_hthread_builtins.c, so the property is not included here.
#
# Note that while Smjs respects the requirement in E5 Section B.2.6, V8 does not.
#{ 'name': 'toGMTString', 'native': 'duk_bi_date_prototype_to_gmt_string', 'length': 0, 'section_b': True },
],
}
bi_regexp_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_regexp_prototype',
'class': 'Function',
'name': 'RegExp',
'length': 2,
'native': 'duk_bi_regexp_constructor',
'callable': True,
'constructable': True,
'values': [],
'functions': [],
}
bi_regexp_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_regexp_constructor',
'class': 'RegExp',
'values': [
# RegExp internal value should match that of new RegExp() (E5 Sections 15.10.6
# and 15.10.7), i.e. a bytecode sequence that matches an empty string.
# The compiled regexp bytecode for that is embedded here, and must match the
# defines in duk_regexp.h.
#
# Note that the property attributes are non-default.
{
# Compiled bytecode, must match duk_regexp.h.
'name': internal('bytecode'),
'value': unichr(0) + # flags (none)
unichr(2) + # nsaved == 2
unichr(1), # DUK_REOP_MATCH
'attributes': '',
},
{
# An object created as new RegExp('') should have the escaped source
# '(?:)' (E5 Section 15.10.4.1). However, at least V8 and Smjs seem
# to have an empty string here.
'name': 'source',
'value': '(?:)',
'attributes': '',
},
{
'name': 'global',
'value': False,
'attributes': '',
},
{
'name': 'ignoreCase',
'value': False,
'attributes': '',
},
{
'name': 'multiline',
'value': False,
'attributes': '',
},
{
# 'lastIndex' is writable, even in the RegExp.prototype object.
# This matches at least V8.
'name': 'lastIndex',
'value': 0,
'attributes': 'w',
},
],
'functions': [
{ 'name': 'exec', 'native': 'duk_bi_regexp_prototype_exec', 'length': 1 },
{ 'name': 'test', 'native': 'duk_bi_regexp_prototype_test', 'length': 1 },
{ 'name': 'toString', 'native': 'duk_bi_regexp_prototype_to_string', 'length': 0 },
],
}
bi_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_error_prototype',
'class': 'Function',
'name': 'Error',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_error_prototype' },
'values': [],
'functions': [],
}
bi_error_prototype = {
'internal_prototype': 'bi_object_prototype',
'external_constructor': 'bi_error_constructor',
'class': 'Error',
'values': [
# Standard properties; property attributes:
#
# 'message' is writable and deletable. This matches the default
# attributes of 'wc'. V8 and Smjs both match this.
#
# 'name' is writable and deletable. This matches the default
# attributes too. Smjs behaves like this, but in V8 'name' is
# non-writable:
#
# > Object.getOwnPropertyDescriptor(Error.prototype, 'name')
# { value: 'Error',
# writable: false,
# enumerable: false,
# configurable: false }
#
# We go with the standard attributes ("wc").
{ 'name': 'name', 'value': 'Error' },
{ 'name': 'message', 'value': '' },
# Custom properties
{ 'name': 'stack',
'getter': 'duk_bi_error_prototype_stack_getter',
'setter': 'duk_bi_error_prototype_nop_setter' },
{ 'name': 'fileName',
'getter': 'duk_bi_error_prototype_filename_getter',
'setter': 'duk_bi_error_prototype_nop_setter' },
{ 'name': 'lineNumber',
'getter': 'duk_bi_error_prototype_linenumber_getter',
'setter': 'duk_bi_error_prototype_nop_setter' },
],
'functions': [
{ 'name': 'toString', 'native': 'duk_bi_error_prototype_to_string', 'length': 0 },
],
}
# NOTE: Error subclass prototypes have an empty 'message' property, even
# though one is inherited already from Error prototype (E5 Section 15.11.7.10).
#
# V8 does not respect this: Error subclasses ("native Errors" in E5 spec)
# do not have a 'message' property at all. Also, in V8 their 'name' property
# is not writable and configurable as E5 requires.
bi_eval_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_eval_error_prototype',
'class': 'Function',
'name': 'EvalError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_eval_error_prototype' },
'values': [],
'functions': [],
}
bi_eval_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_eval_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'EvalError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_range_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_range_error_prototype',
'class': 'Function',
'name': 'RangeError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_range_error_prototype' },
'values': [],
'functions': [],
}
bi_range_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_range_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'RangeError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_reference_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_reference_error_prototype',
'class': 'Function',
'name': 'ReferenceError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_reference_error_prototype' },
'values': [],
'functions': [],
}
bi_reference_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_reference_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'ReferenceError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_syntax_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_syntax_error_prototype',
'class': 'Function',
'name': 'SyntaxError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_syntax_error_prototype' },
'values': [],
'functions': [],
}
bi_syntax_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_syntax_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'SyntaxError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_type_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_type_error_prototype',
'class': 'Function',
'name': 'TypeError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_type_error_prototype' },
'values': [],
'functions': [],
}
bi_type_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_type_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'TypeError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_uri_error_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_uri_error_prototype',
'class': 'Function',
'name': 'URIError',
'length': 1,
'native': 'duk_bi_error_constructor_shared',
'callable': True,
'constructable': True,
'magic': { 'type': 'bidx', 'value': 'bi_uri_error_prototype' },
'values': [],
'functions': [],
}
bi_uri_error_prototype = {
'internal_prototype': 'bi_error_prototype',
'external_constructor': 'bi_uri_error_constructor',
'class': 'Error',
'values': [
{ 'name': 'name', 'value': 'URIError' },
{ 'name': 'message', 'value': '' },
],
'functions': [],
}
bi_math = {
'internal_prototype': 'bi_object_prototype',
# apparently no external 'prototype' property
# apparently no external 'constructor' property
'class': 'Math',
'values': [
{ 'name': 'E', 'value': DBL_E, 'attributes': '' },
{ 'name': 'LN10', 'value': DBL_LN10, 'attributes': '' },
{ 'name': 'LN2', 'value': DBL_LN2, 'attributes': '' },
{ 'name': 'LOG2E', 'value': DBL_LOG2E, 'attributes': '' },
{ 'name': 'LOG10E', 'value': DBL_LOG10E, 'attributes': '' },
{ 'name': 'PI', 'value': DBL_PI, 'attributes': '' },
{ 'name': 'SQRT1_2', 'value': DBL_SQRT1_2, 'attributes': '' },
{ 'name': 'SQRT2', 'value': DBL_SQRT2, 'attributes': '' },
],
'functions': [
{ 'name': 'abs', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_FABS_IDX } },
{ 'name': 'acos', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_ACOS_IDX } },
{ 'name': 'asin', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_ASIN_IDX } },
{ 'name': 'atan', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_ATAN_IDX } },
{ 'name': 'atan2', 'native': 'duk_bi_math_object_twoarg_shared', 'length': 2, 'magic': { 'type': 'plain', 'value': BI_MATH_ATAN2_IDX } },
{ 'name': 'ceil', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_CEIL_IDX } },
{ 'name': 'cos', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_COS_IDX } },
{ 'name': 'exp', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_EXP_IDX } },
{ 'name': 'floor', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_FLOOR_IDX } },
{ 'name': 'log', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_LOG_IDX } },
{ 'name': 'max', 'native': 'duk_bi_math_object_max', 'length': 2, 'varargs': True },
{ 'name': 'min', 'native': 'duk_bi_math_object_min', 'length': 2, 'varargs': True },
{ 'name': 'pow', 'native': 'duk_bi_math_object_twoarg_shared', 'length': 2, 'magic': { 'type': 'plain', 'value': BI_MATH_POW_IDX } },
{ 'name': 'random', 'native': 'duk_bi_math_object_random', 'length': 0 },
{ 'name': 'round', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_ROUND_IDX } },
{ 'name': 'sin', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_SIN_IDX } },
{ 'name': 'sqrt', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_SQRT_IDX } },
{ 'name': 'tan', 'native': 'duk_bi_math_object_onearg_shared', 'length': 1, 'magic': { 'type': 'plain', 'value': BI_MATH_TAN_IDX } },
],
}
bi_json = {
'internal_prototype': 'bi_object_prototype',
# apparently no external 'prototype' property
# apparently no external 'constructor' property
'class': 'JSON',
'values': [],
'functions': [
{ 'name': 'parse', 'native': 'duk_bi_json_object_parse', 'length': 2 },
{ 'name': 'stringify', 'native': 'duk_bi_json_object_stringify', 'length': 3 },
],
}
# E5 Section 13.2.3
bi_type_error_thrower = {
'internal_prototype': 'bi_function_prototype',
'class': 'Function',
'name': 'ThrowTypeError', # custom, matches V8
'length': 0,
'native': 'duk_bi_type_error_thrower',
'callable': True,
'constructable': False, # This is not clearly specified, but [[Construct]] is not set in E5 Section 13.2.3.
'values': [],
'functions': [],
}
bi_duk = {
'internal_prototype': 'bi_object_prototype',
'class': 'Object',
'values': [
# Note: 'version' is added from parameter file.
# They are intentionally non-writable and non-configurable now.
{ 'name': 'Buffer', 'value': { 'type': 'builtin', 'id': 'bi_buffer_constructor' } },
{ 'name': 'Pointer', 'value': { 'type': 'builtin', 'id': 'bi_pointer_constructor' } },
{ 'name': 'Thread', 'value': { 'type': 'builtin', 'id': 'bi_thread_constructor' } },
{ 'name': 'Logger', 'value': { 'type': 'builtin', 'id': 'bi_logger_constructor' } },
],
'functions': [
{ 'name': 'info', 'native': 'duk_bi_duk_object_info', 'length': 1 },
{ 'name': 'line', 'native': 'duk_bi_duk_object_line', 'length': 0 },
{ 'name': 'gc', 'native': 'duk_bi_duk_object_gc', 'length': 1 },
{ 'name': 'fin', 'native': 'duk_bi_duk_object_fin', 'length': 0, 'varargs': True },
{ 'name': 'enc', 'native': 'duk_bi_duk_object_enc', 'length': 0, 'varargs': True },
{ 'name': 'dec', 'native': 'duk_bi_duk_object_dec', 'length': 0, 'varargs': True },
{ 'name': 'compact', 'native': 'duk_bi_duk_object_compact', 'length': 1 },
],
}
bi_thread_constructor = {
'internal_prototype': 'bi_function_prototype',
'external_prototype': 'bi_thread_prototype',
'class': 'Function',
'name': 'Thread',
'length': 1,
'varargs': True,
'native': | |
<gh_stars>1-10
import pandas as pd
import sys
import numpy as np
from lib.thermodynamic_constants import *
import os
sys.path.append(os.path.join(os.getcwd(), 'Modules'))
import lib.util
from scipy.stats import scoreatpercentile
import matplotlib.pyplot as plt
import random
#global variable
#ATP_ADP_GFE = 208.44 #kcal/mol
#NAD_NADH_GFE = 14.96 #kcal/mol
#NADP_NADPH_GFE = 15.63 #kcal/mol
ATP_ADP_GFE = 0 #kcal/mol
NAD_NADH_GFE = 0 #kcal/mol
NADP_NADPH_GFE = 0 #kcal/mol
def H_rotational(x, T):
#T = 298.15
Hrot = 1.5 * R_kCal * T
return Hrot
def S_rotational(x, T):
T = 298.15
qrotRoomTemp = x.QRot
qrot = qrotRoomTemp * (T / 298.15) ** (1.5)
Srot = R_kCal * (np.log(qrot) + 1.5)
return Srot
def S_translational(x, T):
# Translational
mass = x.Mass
Strans = R_kCal * (np.log((2.0 * np.pi * amutokg * mass * kb * T)
** (1.5) * kb * T / 101325.0 / (hJ ** 3.0)) + 2.5)
return Strans
def H_translational(x, T):
# Translational
Htrans = 1.5 * R_kCal * T
return Htrans
def H_vibrational(x, T):
# Vibrational
freqs = x.Freq
hVibArray = []
ZPVE = np.sum(freqs)
ZPVE = 0.5 * hJ * float(100) * speedc * na * ZPVE / (kcaljou)
hvib = 0
for i in xrange(len(freqs)):
freqsi = freqs[i]
ui = freqsi * float(100) * speedc * hJ / (kb * T)
expui = np.exp(ui)
hvib = hvib + freqsi * \
float(100) * speedc * np.exp(-ui) / (1.0 - np.exp(-ui))
hvib = hvib * hJ * na / kcaljou + ZPVE
hVibArray.append(hvib)
Hvib = sum(hVibArray)
return Hvib
def S_vibrational(x, T):
# Vibrational
freqs = x.Freq
sVibArray = []
ZPVE = sum(freqs)
ZPVE = 0.5 * hJ * float(100) * speedc * na * ZPVE / (kcaljou)
svib = 0
for i in xrange(len(freqs)):
freqsi = freqs[i]
ui = freqsi * float(100) * speedc * hJ / (kb * T)
expui = np.exp(ui)
svib = svib + ui / (expui - 1.0) - log(1.0 - 1.0 / expui)
svib = float(1000) * svib * R_kCal
sVibArray.append(svib)
Svib = sum(sVibArray)
return Svib
def H_total(x, T, SPE = 1, vib = True):
'''
Inputs:
SPE --> Flag, determines whether to use alternative
single point energy calculation.
1 --> alternative (e.g. DLPNO), SPE calculation
0 --> SPE taken from e.g. B3LYP energy minimization
'''
# Total
if SPE=='DLPNO': #SPE = 1, Take SPE from alternative electronic structure method
Htot = x.HTrans + x.HVib + x.HRot + x.DLPNO_SPE + R_kCal * T
elif SPE=='OMEGA':
Htot = x.HTrans + x.HVib + x.HRot + x.OMEGA_SPE + R_kCal * T
elif SPE=='SPE':
Htot = x.HTrans + x.HVib + x.HRot + x.SPE + R_kCal * T
else: #SPE = 0, Take SPE from same energy minimization, Harmonic Analysis method.
Htot = x.HTrans + x.HVib + x.HRot + x.EMin + R_kCal * T
if not vib:
Htot = x.HTrans + x.HRot + x.EMin + R_kCal * T
return Htot
def S_total(x, T, vib = True):
# Total
Stot = x.STrans + x.SVib + x.SRot
if not vib:
Stot = x.STrans + x.SRot
return Stot
def G_total(x, T):
# Total
Htot = x.HTot
Stot = x.STot
G = Htot - T * Stot
return G
def AlbertyTransform(x, T, pH, mu):
G_transf = array_transform_ab_initio(np.array([x.GTot]), np.array([x.numberH]),
np.array([x.charge]), default_nMg, pH,
default_pMg, default_I, T, mu)
return G_transf
def combine_conformers(x, T, Alberty):
#T = 298.15
if Alberty:
G_array = np.array(x['G_app'].values)
G_combo = -R_kCal * T * util.log_sum_exp(G_array / (-R_kCal * T))
else:
G_array = x['GTot'].values
G_combo = -R_kCal * T * util.log_sum_exp(G_array / (-R_kCal * T))
return pd.Series([G_combo], index=["G_avg"])
def combine_protonation(x, T):
#T = 298.15
G_array = x['G_avg'].values
G_combo = -R_kCal * T * util.log_sum_exp(G_array / (-R_kCal * T))
return pd.Series([G_combo], index=["G_f"])
def G_single_molecule(df, molStr, T, pH, mu, SPE = 'EMin', vib = True):
'''
WARNING: you are using merged as a global variable
inputs:
--> mu --> proton solvation potential
--> SPE --> Flag, determines whether to use alternative
single point energy calculation.
'''
#Filter data by molecule string.
df_mol = df[df.Molecule == molStr]
# Translational
df_mol['HTrans'] = df_mol.apply(H_translational, axis=1, args=[T])
df_mol['STrans'] = df_mol.apply(S_translational, axis=1, args=[T])
# Rotational
df_mol['HRot'] = df_mol.apply(H_rotational, axis=1, args=[T])
df_mol['SRot'] = df_mol.apply(S_rotational, axis=1, args=[T])
# Vibrational
df_mol['HVib'] = df_mol.apply(H_vibrational, axis=1, args=[T])
df_mol['SVib'] = df_mol.apply(S_rotational, axis=1, args=[T])
# Total
df_mol['HTot'] = df_mol.apply(H_total, axis=1, args=[T, SPE, vib])
df_mol['STot'] = df_mol.apply(S_total, axis=1, args=[T, vib])
df_mol['GTot'] = df_mol.apply(G_total, axis=1, args=[T])
# Transformed
df_mol['G_app'] = df_mol.apply(AlbertyTransform, axis=1, args=[T, pH, mu])
return df_mol
def average_conformers(df_mol,T, Alberty = True):
# Average conformers
grouped1 = df_mol.groupby(["Molecule", "charge"])
grouped1 = grouped1.apply(combine_conformers, T, Alberty)
grouped1 = grouped1.reset_index()
return grouped1
def average_protonation(grouped1,T):
# Average protonation states
grouped2 = grouped1.groupby('Molecule')
grouped2 = grouped2.apply(combine_protonation, (T))
grouped2 = grouped2.reset_index()
return grouped2
def filter_data_IQR(df, mol, z, Alberty = True, numStd= 1, iqr_factor = 1.349):
data = df[(df['charge'] == z) &
(df['Molecule'] == mol)]
if Alberty: #Use transformed Gibbs Energies.
#"Filter using the Alberty transformed Gibbs energies"
tempMedian = data.median().G_app
tempIQR = scoreatpercentile(data.G_app.values,75) - scoreatpercentile(data.G_app.values,25)
tempStd = iqr_factor*tempIQR
#and filter
dataFiltered = data[(np.abs(data.G_app-tempMedian)
<=numStd*tempStd) ]
#append to empty data frame
else: #Use untransformed Gibbs Energies
#"Filter using the Alberty transformed Gibbs energies"
tempMedian = data.median().GTot
tempIQR = scoreatpercentile(data.GTot.values,75) - scoreatpercentile(data.GTot.values,25)
tempStd = iqr_factor*tempIQR
#and filter
dataFiltered = data[(np.abs(data.GTot-tempMedian)
<=numStd*tempStd) ]
#append to empty data frame
return dataFiltered
def sample_conformers(data, sampleSize):
'''
input: df --> dataframe with thermodynamic data
for single molecule/charge pair
'''
rows = random.sample(data.index, sampleSize)
sampledData = data.ix[rows]
return sampledData
def get_substrates(rxnList):
subs = [s.strip() for s in (rxnList.split('=')[0]).split('+')]
return subs
def get_products(rxnList):
prod = [p.strip() for p in (rxnList.split('=')[1]).split('+')]
return prod
def Gf_reactants(reacts, df, T, pH, mu, sampleSize, SPE = 'EMin', vib = True, Alberty = True):
G_f_r = 0
for r in reacts:
#Get raw thermodynamic data for this reactant
df_mol = G_single_molecule(df, r, T, pH, mu, SPE, vib)
#Get all protononation states
zList = set(df_mol[df_mol['Molecule']==r].charge.values)
#Data frame where we'll store dG_f of each protonation state
df_dGProt_all = pd.DataFrame()
for z in zList: #For each protonation state
#Filter data
dataFiltered = filter_data_IQR(df_mol,r,z,Alberty) #option: with or without Alberty transform
#subsample data
dataSampled = sample_conformers(dataFiltered, sampleSize)
#Average over conformers and store in df_dGProt_all
df_dGProt_temp = average_conformers(dataSampled, T, Alberty)
df_dGProt_all = df_dGProt_all.append(df_dGProt_temp)
#average over all protonation states for this reactant
df_dGMol = average_protonation(df_dGProt_all, T)
#update substrate formation energy
G_f_r += df_dGMol.G_f.values[0]
return G_f_r
def getGr(rxn_str, df, T, pH, mu, sampleSize, SPE = 'EMin', vib = True, Alberty = True):
'''
rxn_str --> The reaction string, with numW
df --> The data frame with raw electronic structure data.
T, ph, mu, sampleSize, SPE --> self explanatoryd
'''
subs = get_substrates(rxn_str)
prods = get_products(rxn_str)
#adding waters
numWats = subs[0].split('_')[1]
G_f_other = 0
#ATP/ADP reaction
if 'C00002' in rxn_str and 'C00008' in rxn_str:
if 'C00002_' + numWats in subs:
G_f_other = ATP_ADP_GFE
subs.remove('C00002_' + numWats)
prods.remove('C00008_' + numWats)
else:
G_f_other = -ATP_ADP_GFE
subs.remove('C00008_' + numWats)
prods.remove('C00002_' + numWats)
#NADPH oxidoreductase reaction
if 'C00005' in rxn_str and 'C00006' in rxn_str:
if 'C00005_' + numWats in subs:
G_f_other = -NADP_NADPH_GFE
subs.remove('C00005_' + numWats)
prods.remove('C00006_' + numWats)
else:
G_f_other = NAD_NADH_GFE
subs.remove('C00006_' + numWats)
prods.remove('C00005_' + numWats)
#NADH oxidoreductase reaction
if 'C00003' in rxn_str and 'C00004' in rxn_str:
if 'C00003_' + numWats in subs:
G_f_other = NAD_NADH_GFE
subs.remove('C00003_' + numWats)
prods.remove('C00004_' + numWats)
else:
G_f_other = -NAD_NADH_GFE
subs.remove('C00004_' + numWats)
prods.remove('C00003_' + numWats)
G_f_s = Gf_reactants(subs, df, T, pH, mu, sampleSize, SPE, vib, Alberty)
G_f_p = Gf_reactants(prods, df, T, pH, mu, sampleSize, SPE, vib, Alberty)
G_r = G_f_p - G_f_s + G_f_other
return (G_f_s, G_f_p, G_r)
def getGr_pandas(x, rxn_str, dfAll, mu, sampleSize, numIter, SPE = 'EMin', vib = True):
'''
x --> data frame with experimental values for a single reaction
rxn_str --> The reaction string, with numW
df --> The data frame with raw electronic structure data.
T, ph, mu, sampleSize, SPE --> self explanatoryd
'''
#Set some variables
Gr_array = np.zeros(numIter)
#Get values from data frame with experimental data
T = x.Temp #temperature
pH = x.PH #PH
GR_exp = x.GR_exp #Experimental Gibbs reaction energy.
#Perform G_R estimate many times, | |
<filename>container/pyf/dataPreloader.py<gh_stars>1-10
from functools import cache
from types import MappingProxyType
from models import BaseEntities
def ensureDataItem(session, Model, name):
itemRecords = session.query(Model).filter(Model.name == name).all()
itemRecordsLen = len(itemRecords)
if itemRecordsLen == 0:
itemRecord = Model(name='department')
session.add(itemRecord)
session.commit()
else:
assert itemRecordsLen == 1, f'Database has inconsistencies {Model}, {name}'
itemRecord = itemRecords[0]
return itemRecord.id
@cache
def ensureData(SessionMaker):
UserModel, GroupModel, RoleModel, GroupTypeModel, RoleTypeModel = BaseEntities.GetModels()
session = SessionMaker()
try:
departmentTypeId = ensureDataItem(session, GroupTypeModel, 'department')
facultyTypeId = ensureDataItem(session, GroupTypeModel, 'faculty')
studyGroupId = ensureDataItem(session, GroupTypeModel, 'studygroup')
departmentHeadRoleTypeId = ensureDataItem(session, RoleTypeModel, 'head of department')
deanRoleTypeId = ensureDataItem(session, RoleTypeModel, 'dean')
viceDeanRoleTypeId = ensureDataItem(session, RoleTypeModel, 'vice dean')
rectorRoleTypeId = ensureDataItem(session, RoleTypeModel, 'rector')
viceRectorRoleTypeId = ensureDataItem(session, RoleTypeModel, 'vice rector')
result = {
'departmentTypeId': departmentTypeId,
'facultyTypeId': facultyTypeId,
'studyGroupId': studyGroupId,
'departmentHeadRoleTypeId': departmentHeadRoleTypeId,
'deanRoleTypeId': deanRoleTypeId,
'viceDeanRoleTypeId': viceDeanRoleTypeId,
'rectorRoleTypeId': rectorRoleTypeId,
'viceRectorRoleTypeId': viceRectorRoleTypeId
}
finally:
session.close()
return MappingProxyType(result)
import random
def randomUser(mod='main'):
surNames = [
'Novák', 'Nováková', 'Svobodová', 'Svoboda', 'Novotná',
'Novotný', 'Dvořáková', 'Dvořák', 'Černá', 'Černý',
'Procházková', 'Procházka', 'Kučerová', 'Kučera', 'Veselá',
'Veselý', 'Horáková', 'Krejčí', 'Horák', 'Němcová',
'Marková', 'Němec', 'Pokorná', 'Pospíšilová','Marek'
]
names = [
'Jiří', 'Jan', 'Petr', 'Jana', 'Marie', 'Josef',
'Pavel', 'Martin', 'Tomáš', 'Jaroslav', 'Eva',
'Miroslav', 'Hana', 'Anna', 'Zdeněk', 'Václav',
'Michal', 'František', 'Lenka', 'Kateřina',
'Lucie', 'Jakub', 'Milan', 'Věra', 'Alena'
]
name1 = random.choice(names)
name2 = random.choice(names)
name3 = random.choice(surNames)
email = f'{name1}.{name2}.{name3}@{mod}.<EMAIL>'
return {'name': f'{name1} {name2}', 'surname': name3, 'email': email}
def preloadData(SessionMaker):
session = SessionMaker()
UserModel, GroupModel, RoleModel, GroupTypeModel, RoleTypeModel = BaseEntities.GetModels()
typeIds = ensureData(SessionMaker)
allTeachersGroup = GroupModel(name='teachers')
allStudentsGroup = GroupModel(name='students')
session.add(allTeachersGroup)
session.add(allStudentsGroup)
session.commit()
def RandomizedStudents(faculty, studyGroup, count=10):
for _ in range(count):
student = randomUser(mod=faculty.name)
studentRecord = UserModel(**student)
session.add(studentRecord)
faculty.users.append(studentRecord)
studyGroup.users.append(studentRecord)
allStudentsGroup.users.append(studentRecord)
session.commit()
def RandomizedStudyGroup(faculty):
name = f"{faculty.name}5-{random.choice([1, 2, 3, 4, 5])}{random.choice(['B', 'C', 'K'])}{random.choice(['A', 'E', 'I'])}"
studyGroupRecord = GroupModel(name=name, grouptype_id=typeIds['studyGroupId'])
session.add(studyGroupRecord)
session.commit()
RandomizedStudents(faculty, studyGroupRecord, count=random.randint(5, 15))
pass
def RandomizedTeachers(faculty, department, count=10):
for _ in range(count):
teacher = randomUser(mod=faculty.name)
teacherRecord = UserModel(**teacher)
session.add(teacherRecord)
faculty.users.append(teacherRecord)
department.users.append(teacherRecord)
allTeachersGroup.users.append(teacherRecord)
session.commit()
def RandomizedDepartment(faculty, index):
name = f"{faculty.name}_{index}_{random.choice(['B', 'C', 'K'])}{random.choice(['A', 'E', 'I'])}"
departmentRecord = GroupModel(name=name, grouptype_id=typeIds['departmentTypeId'])
session.add(departmentRecord)
session.commit()
RandomizedTeachers(faculty, departmentRecord, count=random.randint(5, 20))
pass
def RandomizedFaculty(index):
facultyGroup = GroupModel(name=f'F{index}', grouptype_id=typeIds['facultyTypeId'])
session.add(facultyGroup)
session.commit()
departmentCount = random.randrange(4, 14)
for _ in range(departmentCount):
RandomizedDepartment(facultyGroup, index=_)
studyGroupCount = random.randrange(20, 40)
for _ in range(studyGroupCount):
RandomizedStudyGroup(facultyGroup)
session.commit()
def RandomizedUniversity():
facultyCount = random.randrange(3, 7)
for index in range(facultyCount):
RandomizedFaculty(index)
session.commit()
RandomizedUniversity()
session.commit()
session.close()
def loadRandomizedData():
pass
def subjects():
"""3D optická digitalizace 1
Agentní a multiagentní systémy
Aktuální témata grafického designu
Algebra
Algoritmy
Algoritmy (v angličtině)
Analogová elektronika 1
Analogová elektronika 2
Analogová technika
Analýza a návrh informačních systémů
Analýza binárního kódu
Analýza systémů založená na modelech
Anglická konverzace na aktuální témata
Anglická konverzace na aktuální témata
Angličtina 1: mírně pokročilí 1
Angličtina 2: mírně pokročilí 2
Angličtina 3: středně pokročilí 1
Angličtina 3: středně pokročilí 1
Angličtina 4: středně pokročilí 2
Angličtina 4: středně pokročilí 2
Angličtina pro doktorandy
Angličtina pro Evropu
Angličtina pro Evropu
Angličtina pro IT
Angličtina pro IT
Angličtina: praktický kurz obchodní konverzace a prezentace
Aplikace paralelních počítačů
Aplikovaná herní studia - výzkum a design
Aplikované evoluční algoritmy
Architektura 20. století
Architektury výpočetních systémů
Audio elektronika
Automatizované testování a dynamická analýza
Autorská práva - letní
Bakalářská práce
Bakalářská práce Erasmus (v angličtině)
Bayesovské modely pro strojové učení (v angličtině)
Bezdrátové a mobilní sítě
Bezpečná zařízení
Bezpečnost a počítačové sítě
Bezpečnost informačních systémů
Bezpečnost informačních systémů a kryptografie
Bioinformatika
Bioinformatika
Biologií inspirované počítače
Biometrické systémy
Biometrické systémy (v angličtině)
Blockchainy a decentralizované aplikace
CCNA Kybernetická bezpečnost (v angličtině)
České umění 1. poloviny 20. století v souvislostech - zimní
České umění 2. poloviny 20. století v souvislostech - letní
Chemoinformatika
Číslicové zpracování akustických signálů
Číslicové zpracování signálů (v angličtině)
CNC obrábění / Roboti v umělecké praxi
Daňový systém ČR
Databázové systémy
Databázové systémy (v angličtině)
Dějiny a filozofie techniky
Dějiny a kontexty fotografie 1
Dějiny a kontexty fotografie 2
Dějiny designu 1 - letní
Dějiny designu 1 - zimní
Desktop systémy Microsoft Windows
Digitální forenzní analýza (v angličtině)
Digitální marketing a sociální média (v angličtině)
Digitální sochařství - 3D tisk 1
Digitální sochařství - 3D tisk 2
Diplomová práce
Diplomová práce (v angličtině)
Diplomová práce Erasmus (v angličtině)
Diskrétní matematika
Dynamické jazyky
Ekonomie informačních produktů
Elektroakustika 1
Elektronický obchod (v angličtině)
Elektronika pro informační technologie
Elektrotechnický seminář
Evoluční a neurální hardware
Evoluční výpočetní techniky
Filozofie a kultura
Finanční analýza
Finanční management pro informatiky
Finanční trhy
Formální analýza programů
Formální jazyky a překladače
Formální jazyky a překladače (v angličtině)
Funkcionální a logické programování
Funkční verifikace číslicových systémů
Fyzika 1 - fyzika pro audio inženýrství
Fyzika v elektrotechnice (v angličtině)
Fyzikální optika
Fyzikální optika (v angličtině)
Fyzikální seminář
Grafická a zvuková rozhraní a normy
Grafická uživatelská rozhraní v Javě
Grafická uživatelská rozhraní v Javě (v angličtině)
Grafická uživatelská rozhraní v X Window
Grafické a multimediální procesory
Grafové algoritmy
Grafové algoritmy (v angličtině)
Hardware/Software Codesign
Hardware/Software Codesign (v angličtině)
Herní studia
Informační systémy
Informační výchova a gramotnost
Inteligentní systémy
Inteligentní systémy
Internetové aplikace
Inženýrská pedagogika a didaktika
Inženýrská pedagogika a didaktika
Jazyk C
Klasifikace a rozpoznávání
Kódování a komprese dat
Komunikační systémy pro IoT
Konvoluční neuronové sítě
Kritická analýza digitálních her
Kruhové konzultace
Kryptografie
Kultura projevu a tvorba textů
Kultura projevu a tvorba textů
Kurz pornostudií
Lineární algebra
Lineární algebra
Logika
Makroekonomie
Management
Management projektů
Manažerská komunikace a prezentace
Manažerská komunikace a prezentace
Manažerské vedení lidí a řízení času
Manažerské vedení lidí a řízení času
Matematická analýza 1
Matematická analýza 2
Matematická logika
Matematické struktury v informatice (v angličtině)
Matematické výpočty pomocí MAPLE
Matematické základy fuzzy logiky
Matematický seminář
Matematický software
Matematika 2
Maticový a tenzorový počet
Mechanika a akustika
Mikroekonomie
Mikroprocesorové a vestavěné systémy
Mikroprocesorové a vestavěné systémy (v angličtině)
Mobilní roboty
Modelování a simulace
Modelování a simulace
Moderní matematické metody v informatice
Moderní metody zobrazování 3D scény
Moderní metody zpracování řeči
Moderní teoretická informatika
Moderní trendy informatiky (v angličtině)
Molekulární biologie
Molekulární genetika
Multimédia
Multimédia (v angličtině)
Multimédia v počítačových sítích
Návrh a implementace IT služeb
Návrh a realizace elektronických přístrojů
Návrh číslicových systémů
Návrh číslicových systémů (v angličtině)
Návrh kyberfyzikálních systémů (v angličtině)
Návrh počítačových systémů
Návrh vestavěných systémů
Návrh, správa a bezpečnost
Operační systémy
Optické sítě
Optika
Optimalizace
Optimalizační metody a teorie hromadné obsluhy
Optimální řízení a identifikace
Paralelní a distribuované algoritmy
Paralelní výpočty na GPU
Pedagogická psychologie
Pedagogická psychologie
Plošné spoje a povrchová montáž
Počítačová fyzika I
Počítačová fyzika II
Počítačová grafika
Počítačová grafika
Počítačová grafika (v angličtině)
Počítačová podpora konstruování
Počítačové komunikace a sítě
Počítačové vidění (v angličtině)
Počítačový seminář
Podnikatelská laboratoř
Podnikatelské minimum
Pokročilá bioinformatika
Pokročilá matematika
Pokročilá počítačová grafika (v angličtině)
Pokročilá témata administrace operačního systému Linux
Pokročilé asemblery
Pokročilé biometrické systémy
Pokročilé číslicové systémy
Pokročilé databázové systémy
Pokročilé databázové systémy (v angličtině)
Pokročilé informační systémy
Pokročilé komunikační systémy (v angličtině)
Pokročilé operační systémy
Pokročilé směrování v páteřních sítích (ENARSI)
Pokročilé techniky návrhu číslicových systémů
Pokročilý návrh a zabezpečení podnikových sítí
Praktické aspekty vývoje software
Praktické paralelní programování
Pravděpodobnost a statistika
Právní minimum
Právní minimum
Právo informačních systémů
Přenos dat, počítačové sítě a protokoly
Přenos dat, počítačové sítě a protokoly (v angličtině)
Principy a návrh IoT systémů
Principy programovacích jazyků a OOP
Principy programovacích jazyků a OOP (v angličtině)
Principy syntézy testovatelných obvodů
Programovací seminář
Programování na strojové úrovni
Programování v .NET a C#
Programování zařízení Apple
Projektová praxe 1
Projektová praxe 1
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 1 (v angličtině)
Projektová praxe 2
Projektová praxe 2
Projektová praxe 2 (v angličtině)
Projektová praxe 2 (v angličtině)
Projektová praxe 3
Projektování datových sítí
Projektový manažer
Prostředí distribuovaných aplikací
Rádiová komunikace
Regulované gramatiky a automaty
Rétorika
Rétorika
Řízení a regulace 1
Řízení a regulace 2
Robotika (v angličtině)
Robotika a manipulátory
Robotika a zpracování obrazu
Semestrální projekt
Semestrální projekt
Semestrální projekt (v angličtině)
Semestrální projekt Erasmus (v angličtině)
Semestrální projekt Erasmus (v angličtině)
Seminář C#
Seminář C++
Seminář diskrétní matematiky a | |
<reponame>LaudateCorpus1/python-redfish-utility
# ##
# Copyright 2016-2021 <NAME>, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ##
# -*- coding: utf-8 -*-
""" Fwpkg Command for rdmc """
import os
import json
import shutil
import zipfile
import tempfile
import ctypes
from ctypes import c_char_p, c_int, c_bool
from redfish.hpilo.risblobstore2 import BlobStore2
from rdmc_helper import (
IncompatibleiLOVersionError,
ReturnCodes,
Encryption,
InvalidCommandLineErrorOPTS,
InvalidCommandLineError,
InvalidFileInputError,
UploadError,
TaskQueueError,
FirmwareUpdateError,
)
def _get_comp_type(payload):
"""Get's the component type and returns it
:param payload: json payload of .fwpkg file
:type payload: dict.
:returns: returns the type of component. Either A,B,C, or D.
:rtype: string
"""
ctype = ""
if "Uefi" in payload["UpdatableBy"] and "RuntimeAgent" in payload["UpdatableBy"]:
ctype = "D"
else:
for device in payload["Devices"]["Device"]:
for image in device["FirmwareImages"]:
if "DirectFlashOk" not in list(image.keys()):
raise InvalidFileInputError("Cannot flash this firmware.")
if image["DirectFlashOk"]:
ctype = "A"
if image["ResetRequired"]:
ctype = "B"
break
elif image["UefiFlashable"]:
ctype = "C"
break
else:
ctype = "D"
return ctype
class FwpkgCommand:
"""Fwpkg command class"""
def __init__(self):
self.ident = {
"name": "flashfwpkg",
"usage": None,
"description": "Run to upload and flash "
"components from fwpkg files.\n\n\tUpload component and flashes it or sets a task"
"queue to flash.\n\texample: flashfwpkg component.fwpkg.\n\n\t"
"Skip extra checks before adding taskqueue. (Useful when adding "
"many flashfwpkg taskqueue items in sequence.)\n\texample: flashfwpkg "
"component.fwpkg --ignorechecks",
"summary": "Flashes fwpkg components using the iLO repository.",
"aliases": ["fwpkg"],
"auxcommands": [
"UploadComponentCommand",
"UpdateTaskQueueCommand",
"FirmwareUpdateCommand",
"FwpkgCommand",
],
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def run(self, line, help_disp=False):
"""Main fwpkg worker function
:param line: string of arguments passed in
:type line: str.
:param help_disp: display help flag
:type line: bool.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(options, _) = self.rdmc.rdmc_parse_arglist(self, line)
if not line or line[0] == "help":
self.parser.print_help()
return ReturnCodes.SUCCESS
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.fwpkgvalidation(options)
if self.rdmc.app.typepath.defs.isgen9:
raise IncompatibleiLOVersionError(
"iLO Repository commands are only available on iLO 5."
)
if self.rdmc.app.getiloversion() <= 5.120 and options.fwpkg.lower().startswith(
"iegen10"
):
raise IncompatibleiLOVersionError(
"Please upgrade to iLO 5 1.20 or "
"greater to ensure correct flash of this firmware."
)
tempdir = ""
if not options.fwpkg.endswith(".fwpkg"):
InvalidFileInputError(
"Invalid file type. Please make sure the file "
"provided is a valid .fwpkg file type."
)
try:
components, tempdir, comptype = self.preparefwpkg(self, options.fwpkg)
if comptype == "D":
raise InvalidFileInputError("Unable to flash this fwpkg file.")
elif comptype == "C":
try:
self.taskqueuecheck()
except TaskQueueError as excp:
if options.ignore:
self.rdmc.ui.warn(str(excp) + "\n")
else:
raise excp
self.applyfwpkg(options, tempdir, components, comptype)
if comptype == "A":
message = "Firmware has successfully been flashed.\n"
if "ilo" in options.fwpkg.lower():
message += (
"iLO will reboot to complete flashing. Session will be"
" terminated.\n"
)
elif comptype == "B":
message = (
"Firmware has successfully been flashed and a reboot is required for "
"this firmware to take effect.\n"
)
elif comptype == "C":
message = "This firmware is set to flash on reboot.\n"
self.rdmc.ui.printer(message)
except (FirmwareUpdateError, UploadError) as excp:
raise excp
finally:
if tempdir:
shutil.rmtree(tempdir)
self.cmdbase.logout_routine(self, options)
# Return code
return ReturnCodes.SUCCESS
def taskqueuecheck(self):
"""Check taskqueue for potential issues before starting"""
select = "ComputerSystem."
results = self.rdmc.app.select(selector=select, path_refresh=True)
try:
results = results[0]
except:
pass
powerstate = results.resp.dict["PowerState"]
tasks = self.rdmc.app.getcollectionmembers(
"/redfish/v1/UpdateService/UpdateTaskQueue/"
)
for task in tasks:
if task["State"] == "Exception":
raise TaskQueueError(
"Exception found in taskqueue which will "
"prevent firmware from flashing. Please run "
"iLOrest command: taskqueue --cleanqueue to clear"
" any errors before continuing."
)
if (
task["UpdatableBy"] == "Uefi"
and not powerstate == "Off"
or task["Command"] == "Wait"
):
raise TaskQueueError(
"Taskqueue item found that will "
"prevent firmware from flashing immediately. Please "
"run iLOrest command: taskqueue --resetqueue to "
"reset the queue if you wish to flash immediately "
"or include --ignorechecks to add this firmware "
"into the task queue anyway."
)
if tasks:
self.rdmc.ui.warn(
"Items are in the taskqueue that may delay the flash until they "
"are finished processing. Use the taskqueue command to monitor updates.\n"
)
@staticmethod
def preparefwpkg(self, pkgfile):
"""Prepare fwpkg file for flashing
:param pkgfile: Location of the .fwpkg file
:type pkgfile: string.
:returns: returns the files needed to flash, directory they are located
in, and type of file.
:rtype: string, string, string
"""
files = []
imagefiles = []
payloaddata = None
tempdir = tempfile.mkdtemp()
try:
zfile = zipfile.ZipFile(pkgfile)
zfile.extractall(tempdir)
zfile.close()
except Exception as excp:
raise InvalidFileInputError("Unable to unpack file. " + str(excp))
files = os.listdir(tempdir)
if "payload.json" in files:
with open(os.path.join(tempdir, "payload.json"), encoding="utf-8") as pfile:
data = pfile.read()
payloaddata = json.loads(data)
else:
raise InvalidFileInputError("Unable to find payload.json in fwpkg file.")
comptype = _get_comp_type(payloaddata)
if comptype == "C":
imagefiles = [
self.auxcommands["flashfwpkg"].type_c_change(tempdir, pkgfile)
]
else:
results = self.rdmc.app.getprops(
selector="UpdateService.", props=["Oem/Hpe/Capabilities"]
)
for device in payloaddata["Devices"]["Device"]:
for firmwareimage in device["FirmwareImages"]:
if firmwareimage["FileName"] not in imagefiles:
imagefiles.append(firmwareimage["FileName"])
if (
"blobstore" in self.rdmc.app.redfishinst.base_url
and comptype in ["A", "B"]
and results
and "UpdateFWPKG" in results[0]["Oem"]["Hpe"]["Capabilities"]
):
dll = BlobStore2.gethprestchifhandle()
dll.isFwpkg20.argtypes = [c_char_p, c_int]
dll.isFwpkg20.restype = c_bool
with open(pkgfile, "rb") as fwpkgfile:
fwpkgdata = fwpkgfile.read()
fwpkg_buffer = ctypes.create_string_buffer(fwpkgdata)
if dll.isFwpkg20(fwpkg_buffer, 2048):
imagefiles = [pkgfile]
tempdir = ""
return imagefiles, tempdir, comptype
def type_c_change(self, tdir, pkgloc):
"""Special changes for type C
:param tempdir: path to temp directory
:type tempdir: string.
:param components: components to upload
:type components: list.
:returns: The location of the type C file to upload
:rtype: string.
"""
shutil.copy(pkgloc, tdir)
fwpkgfile = os.path.split(pkgloc)[1]
zfile = fwpkgfile[:-6] + ".zip"
zipfileloc = os.path.join(tdir, zfile)
os.rename(os.path.join(tdir, fwpkgfile), zipfileloc)
return zipfileloc
def applyfwpkg(self, options, tempdir, components, comptype):
"""Apply the component to iLO
:param options: command line options
:type options: list.
:param tempdir: path to temp directory
:type tempdir: string.
:param components: components to upload
:type components: list.
:param comptype: type of component. Either A,B,C, or D.
:type comptype: str.
"""
for component in components:
taskqueuecommand = " create %s " % os.path.basename(component)
if options.tover:
taskqueuecommand = " create %s --tpmover" % os.path.basename(component)
if component.endswith(".fwpkg") or component.endswith(".zip"):
uploadcommand = "--component %s" % component
else:
uploadcommand = "--component %s" % os.path.join(tempdir, component)
if options.forceupload:
uploadcommand += " --forceupload"
if comptype in ["A", "B"]:
uploadcommand += " --update_target --update_repository"
if options.update_srs:
uploadcommand += " --update_srs"
self.rdmc.ui.printer(
"Uploading firmware: %s\n" % os.path.basename(component)
)
try:
ret = self.auxcommands["uploadcomp"].run(uploadcommand)
if ret != ReturnCodes.SUCCESS:
raise UploadError
except UploadError:
if comptype in ["A", "B"]:
select = self.rdmc.app.typepath.defs.hpilofirmwareupdatetype
results = self.rdmc.app.select(selector=select)
try:
results = results[0]
except:
pass
if results:
update_path = results.resp.request.path
error = self.rdmc.app.get_handler(update_path, silent=True)
self.auxcommands["firmwareupdate"].printerrmsg(error)
else:
raise FirmwareUpdateError(
"Error occurred while updating the firmware."
)
else:
raise UploadError("Error uploading component.")
if comptype == "C":
self.rdmc.ui.warn(
"Setting a taskqueue item to flash UEFI flashable firmware.\n"
)
self.auxcommands["taskqueue"].run(taskqueuecommand)
def fwpkgvalidation(self, options):
"""fwpkg validation function
:param options: command line options
:type options: list.
"""
self.rdmc.login_select_validation(self, options)
def definearguments(self, customparser):
"""Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
customparser.add_argument(
"fwpkg", help="""fwpkg file path""", metavar="[FWPKG]"
)
customparser.add_argument(
"--forceupload",
dest="forceupload",
action="store_true",
help="Add this flag to force upload firmware with the same name "
"already on the repository.",
default=False,
)
customparser.add_argument(
"--ignorechecks",
dest="ignore",
action="store_true",
help="Add this flag to ignore all checks to the taskqueue "
"before attempting to process the .fwpkg file.",
default=False,
)
customparser.add_argument(
"--tpmover",
dest="tover",
action="store_true",
help="If set then the TPMOverrideFlag is passed in on the "
"associated flash operations",
default=False,
)
customparser.add_argument(
"--update_srs",
dest="update_srs",
action="store_true",
help="Add this flag to update the System Recovery Set with the uploaded firmware. "
"NOTE: This requires an account login with the system recovery set privilege.",
default=False,
| |
file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#24:Check:Controllers:Validate all controllers are reachable
print(' Informational Check:#24')
log_file_logger.info('#24:Check:Controllers:Validate all controllers are reachable')
writeFile(report_file, '#24:Check:Controllers:Validate all controllers are reachable\n\n')
try:
unreach_controllers,check_result, check_analysis, check_action = infoChecktthree(controllers_info)
if check_result == 'Failed':
warning_checks['#24:Check:Controllers:Validate all controllers are reachable'] = [ check_analysis, check_action]
log_file_logger.error('#24: Check result: {}'.format(check_result))
log_file_logger.error('#24: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#24: Unreachable Controllers: {}\n'.format(unreach_controllers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#24: Check result: {}'.format(check_result))
log_file_logger.info('#24: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #24:Check:Controllers:Validate all controllers are reachable. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
if cluster_size>1:
cluster_checks = {}
log_file_logger.info('*** Performing Cluster Checks')
print('\n**** Performing Cluster checks\n')
#25:Check:Cluster:Version consistency
print(' Cluster Check:#25')
log_file_logger.info('#25:Check:Cluster:Version consistency')
writeFile(report_file, '#25:Check:Cluster:Version consistency\n\n')
try:
check_result,check_analysis, check_action = criticalChecktwelve(vmanage_info)
if check_result == 'Failed':
cluster_checks['#25:Check:Cluster:Version consistency'] = [ check_analysis, check_action]
log_file_logger.error('#25: Check result: {}'.format(check_result))
log_file_logger.error('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#25: Check result: {}'.format(check_result))
log_file_logger.info('#25: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#25: vManage info: {}\n'.format(vmanage_info))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #25:Check:Cluster:Version consistency. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#26:Check:Cluster:Cluster health
print(' Cluster Check:#26')
log_file_logger.info('#26:Check:Cluster:Cluster health')
writeFile(report_file, '#26:Check:Cluster:Cluster health\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid, 'clusterManagement/list', args.vmanage_port))
services_down, check_result, check_analysis, check_action = criticalCheckthirteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#26:Check:Cluster:Cluster health'] = [ check_analysis, check_action]
log_file_logger.error('#26: Check result: {}'.format(check_result))
log_file_logger.error('#26: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#26: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#26: Check result: {}'.format(check_result))
log_file_logger.info('#26: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #26:Check:Cluster:Cluster health. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#27:Check:Cluster:Cluster ConfigDB topology
print(' Cluster Check:#27')
log_file_logger.info('#27:Check:Cluster:Cluster ConfigDB topology')
writeFile(report_file, '#27:Check:Cluster:Cluster ConfigDB topology\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid,'clusterManagement/list', args.vmanage_port))
configDB_count, check_result, check_analysis, check_action = criticalCheckfourteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#27:Check:Cluster:Cluster ConfigDB topology'] = [ check_analysis, check_action]
log_file_logger.error('#27: Check result: {}'.format(check_result))
log_file_logger.error('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#27: No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#27: Check result: {}'.format(check_result))
log_file_logger.info('#27: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#27: No. of configDB servers in the cluster: {}\n'.format(configDB_count))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #27:Check:Cluster:Cluster ConfigDB topology. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#28:Check:Cluster:Messaging server
print(' Cluster Check:#28')
log_file_logger.info('#28:Check:Cluster:Messaging server')
writeFile(report_file, '#28:Check:Cluster:Messaging server\n\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid,'clusterManagement/list', args.vmanage_port))
cluster_msdown,check_result,check_analysis, check_action = criticalCheckfifteen(cluster_health_data)
if check_result == 'Failed':
cluster_checks['#28:Check:Cluster:Messaging server'] = [ check_analysis, check_action]
log_file_logger.error('#28: Check result: {}'.format(check_result))
log_file_logger.error('#28: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#28: Relevant cluster services that are down: {}\n'.format(services_down))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#28: Check result: {}'.format(check_result))
log_file_logger.info('#28: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #28:Check:Cluster:Messaging server. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#29:Check:Cluster:DR replication status
print(' Cluster Check:#29')
log_file_logger.info('#29:Check:Cluster:DR replication status')
writeFile(report_file, '#29:Check:Cluster:DR replication status\n\n')
try:
dr_data = json.loads(getRequest(version_tuple,vmanage_lo_ip, jsessionid,'disasterrecovery/details', args.vmanage_port))
dr_status, check_action, check_analysis, check_result = criticalChecksixteen(dr_data)
if check_result == 'Failed':
cluster_checks['#29:Check:Cluster:DR replication status'] = [ check_analysis, check_action]
log_file_logger.error('#29: Check result: {}'.format(check_result))
log_file_logger.error('#29: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#29: DR Replication status: {}\n'.format(dr_status))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#29: Check result: {}'.format(check_result))
log_file_logger.info('#29: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #29:Check:Cluster:DR replication status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#30:Check:Cluster:Intercluster communication
print(' Cluster Check:#30')
log_file_logger.info('#30:Check:Cluster:Intercluster communication')
writeFile(report_file, '#30:Check:Cluster:Intercluster communication\n\n')
try:
if criticalCheckseventeen.isAlive():
criticalCheckseventeen.join(10)
if not criticalCheckseventeen.result_queue.empty():
ping_output, ping_output_failed, ping_check_result, ping_check_analysis, ping_check_action = criticalCheckseventeen.result_queue.get()
if ping_check_result == 'Failed':
cluster_checks['#30:Check:Cluster:Intercluster communication'] = [ ping_check_analysis, ping_check_action]
log_file_logger.error('#30: Check result: {}'.format(ping_check_result))
log_file_logger.error('#30: Check Analysis: {}'.format(ping_check_analysis))
log_file_logger.error('#30: Cluster nodes with ping failure: {}\n'.format(ping_output_failed))
writeFile(report_file, 'Result: ERROR - {}\n'.format(ping_check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(ping_check_analysis))
else:
log_file_logger.info('#30: Check result: {}'.format(ping_check_result))
log_file_logger.info('#30: Check Analysis: {}'.format(ping_check_analysis))
log_file_logger.info('#30: Cluster nodes details: {}\n'.format(ping_output))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(ping_check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #30:Check:Cluster:Intercluster communication. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Logging out of the Session using jsessionid
log_file_logger.info('Logging out of the Session')
sessionLogout(vmanage_lo_ip,jsessionid,args.vmanage_port)
log_file_logger.info('Successfully closed the connection')
#version equal to or above 19.2 and below 20.5
elif version_tuple[0:2] >= ('19','2') and version_tuple[0:2] < ('20','5'):
try:
log_file_logger.info('Generating a JSessionID')
jsessionid = generateSessionID(vmanage_lo_ip, args.username, password, args.vmanage_port)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error generating JSessionID, make sure that the username and password entered is correct. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
try:
log_file_logger.info('Generating CSRF Token')
tokenid = CSRFToken(vmanage_lo_ip,jsessionid,args.vmanage_port)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error generating CSRF Token. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m \n\n'.format(log_file_path))
#Preliminary data
log_file_logger.info('****Collecting Preliminary Data\n')
try:
controllers = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid, 'system/device/controllers', args.vmanage_port, tokenid))
controllers_info = controllersInfo(controllers)
log_file_logger.info('Collected controllers information: {}'.format(controllers_info))
system_ip_data = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid, 'device/vmanage', args.vmanage_port, tokenid))
system_ip = system_ip_data['data']['ipAddress']
log_file_logger.info('Collected vManage System IP address: {}'.format(system_ip))
cpu_speed = cpuSpeed()
log_file_logger.info('Collected vManage CPU Speed GHz: {}'.format(cpu_speed))
cpu_count = cpuCount()
log_file_logger.info('Collected vManage CPU Count: {}'.format(cpu_count))
vedges = json.loads(getRequest(version_tuple, vmanage_lo_ip,jsessionid, 'system/device/vedges', args.vmanage_port , tokenid))
vedge_count,vedge_count_active, vedge_info = vedgeCount(vedges)
log_file_logger.info('Collected xEdge Count: {}'.format(vedge_count))
cluster_size, server_mode, vmanage_info = serverMode(controllers_info)
log_file_logger.info('Collected vManage Cluster Size: {}'.format(cluster_size))
log_file_logger.info('Collected vManage Server Mode: {}'.format(server_mode))
disk_controller = diskController()
log_file_logger.info('Collected vManage Disk Controller Type: {}'.format(disk_controller))
dpi_stats = json.loads(getRequest(version_tuple, vmanage_lo_ip, jsessionid,'statistics/settings/status', args.vmanage_port, tokenid))
dpi_status = dpiStatus(dpi_stats)
log_file_logger.info('Collected DPI Status: {}'.format(dpi_status))
server_type = serverType()
log_file_logger.info('Collected Server Type: {}'.format(server_type))
vbond_info, vsmart_info = vbondvmartInfo(controllers_info)
vbond_count = len(vbond_info)
vsmart_count = len(vsmart_info)
log_file_logger.info('vSmart info: {}'.format(vbond_info))
log_file_logger.info('vBond info: {}'.format(vsmart_info))
total_devices = len(controllers_info.keys()) + vedge_count
log_file_logger.info('Total devices: {}'.format(total_devices))
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
raise SystemExit('\033[1;31m ERROR: Error Collecting Preliminary Data. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
print('*Starting Checks, this may take several minutes\n\n')
#Critical Checks
print('\n**** Performing Critical checks\n')
#Begining #30:Check:Cluster:Intercluster communication in the background
if cluster_size>1:
log_file_logger.info('Beginging #30:Check:Cluster:Intercluster communication in the background\n')
try:
cluster_health_data = json.loads(getRequest(version_tuple,vmanage_lo_ip,jsessionid, 'clusterManagement/list', args.vmanage_port, tokenid))
criticalCheckseventeen = criticalCheckseventeen(cluster_health_data, system_ip, log_file_logger)
except Exception as e:
log_file_logger.exception('{}\n'.format(e))
critical_checks = {}
log_file_logger.info('*** Performing Critical Checks\n')
#01:Check:vManage:Validate current version
print(' Critical Check:#01')
log_file_logger.info('#01:Check:vManage:Validate current version')
writeFile(report_file, '#01:Check:vManage:Validate current version\n\n')
try:
boot_partition_size, check_result, check_analysis, check_action = criticalCheckone(version)
if check_result == 'Failed':
critical_checks['#01:Check:vManage:Validate current version'] = [ check_analysis, check_action]
log_file_logger.error('#01: Check result: {}'.format(check_result))
log_file_logger.error('#01: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#01: version: {}'.format(version))
log_file_logger.error('#01: Boot Partition Size: {}\n'.format(boot_partition_size))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#01: Check result: {}'.format(check_result))
log_file_logger.info('#01: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#01: version: {}'.format(version))
log_file_logger.info('#01: Boot Partition Size: {}\n'.format(boot_partition_size))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #01:Check:vManage:Validate current version. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#02:Check:vManage:At minimum 20% server disk space should be available
print(' Critical Check:#02')
log_file_logger.info('#02:Check:vManage:At minimum 20% server disk space should be available')
writeFile(report_file, '#02:Check:vManage:At minimum 20% server disk space should be available\n\n')
try:
optdata_partition_size, rootfs_partition_size, check_result, check_analysis, check_action = criticalCheckTwo()
if check_result == 'Failed':
critical_checks['#02:Check:vManage:At minimum 20% server disk space should be available'] = [check_analysis, check_action]
log_file_logger.error('#02: Check result: {}'.format(check_result))
log_file_logger.error('#02: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#02: /opt/data Used: {}'.format(optdata_partition_size))
log_file_logger.error('#02: /rootfs.rw Used: {}\n'.format(rootfs_partition_size))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis) )
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#02: Check result: {}'.format(check_result))
log_file_logger.info('#02: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#02: /opt/data Used: {}'.format(optdata_partition_size))
log_file_logger.info('#02: /rootfs.rw Used: {}\n'.format(rootfs_partition_size))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #02:Check:vManage:At minimum 20% server disk space should be available. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#03:Check:vManage:Memory size
print(' Critical Check:#03')
log_file_logger.info('#03:Check:vManage:Memory size')
writeFile(report_file, '#03:Check:vManage:Memory size\n')
writeFile(report_file, 'Link to the official documentation: \n https://www.cisco.com/c/en/us/td/docs/routers/sdwan/release/notes/compatibility-and-server-recommendations/ch-server-recs-20-3.html\n\n')
try:
memory_size, memory_size_str, dpi_status, server_type, check_result, check_analysis, check_action = criticalCheckthree(vedge_count, dpi_status, server_type, cluster_size, version_tuple)
if check_result == 'Failed':
critical_checks['#03:Check:vManage:Memory size'] = [ check_analysis, check_action]
log_file_logger.error('#03: Check result: {}'.format(check_result))
log_file_logger.error('#03: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#03: Memory Size GB: {}'.format(memory_size_str))
log_file_logger.error('#03: /rootfs.rw Used: {}'.format(rootfs_partition_size))
log_file_logger.error('#03: Server Type: {}'.format(server_type))
log_file_logger.error('#03: vEdge Count: {}\n'.format(vedge_count))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis) )
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#03: Check result: {}'.format(check_result))
log_file_logger.info('#03: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #03:Check:vManage:Memory size. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#04:Check:vManage:CPU Count
print(' Critical Check:#04')
log_file_logger.info('#04:Check:vManage:CPU Count')
writeFile(report_file, '#04:Check:vManage:CPU Count\n\n')
try:
check_result, check_analysis, check_action = criticalCheckfour(cpu_count, vedge_count, dpi_status, server_type)
if check_result == 'Failed':
critical_checks['#04:Check:vManage:CPU Count'] = [ check_analysis, check_action]
log_file_logger.error('#04: Check result: | |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME>
#
from collections import Counter, Mapping
import numpy
import itertools
from numbers import Number
from pyscf import lib
class StrictCounter(Counter):
def __neg__(self):
return StrictCounter(dict((k, -v) for k, v in self.items()))
def __add__(self, other):
result = self.copy()
result.update(other)
return result.clean()
def __sub__(self, other):
return self + (-StrictCounter(other))
def __eq__(self, other):
return (self-other).is_empty()
def applied_count_condition(self, condition):
"""
Applies a given condition on counts and returns a copy of StrictCounter.
Args:
condition (callable): a condition to apply;
Returns:
A StrictCounter with a condition applied.
"""
return StrictCounter(dict((k, v) for k, v in self.items() if condition(v)))
def clean(self):
"""
Removes zero counts.
Returns:
An instance of StrictCounter with all zero counts removed.
"""
return self.applied_count_condition(lambda c: c != 0)
def positive_only(self):
"""
Removes negative and zero counts.
Returns:
An instance of StrictCounter with positive counts only.
"""
return self.applied_count_condition(lambda c: c > 0)
def is_empty(self):
"""
Checks if empty.
Returns:
True if all counts are zero.
"""
for v in self.values():
if v != 0:
return False
return True
def to_list(self):
"""
Makes a list of this counter with repeating elements.
Returns:
A list with elements from this counter.
"""
return sum(([k] * v for k, v in self.items()), [])
def readonly(self):
"""
Returns a read-only mapping to self.
Returns:
A read-only mapping.
"""
return ReadOnlySCWrapper(self)
def __repr__(self):
return "{{{}}}".format(",".join("{:d}x{}".format(v, repr(k)) for k, v in sorted(self.items())))
class ReadOnlySCWrapper(Mapping):
def __init__(self, data):
self.__data__ = data
def __getitem__(self, key):
return self.__data__[key]
def __len__(self):
return len(self.__data__)
def __iter__(self):
return iter(self.__data__)
def __neg__(self):
return -self.__data__
def __add__(self, other):
return self.__data__ + other
def __sub__(self, other):
return self.__data__ - other
def to_list(self):
return self.__data__.to_list()
readonly = ReadOnlySCWrapper
class OneToOne(dict):
def __init__(self, source=None):
"""
A one-to-one mapping.
Args:
source: source to initialize from;
"""
dict.__init__(self)
self.__bw__ = {}
if source is not None:
self.update(source)
def __setitem__(self, key, value):
if key in self:
raise KeyError("The key {} is already present".format(repr(key)))
if value in self.__bw__:
raise KeyError("The value {} is already present".format(repr(value)))
dict.__setitem__(self, key, value)
self.__bw__[value] = key
def __delitem__(self, key):
if key not in self:
raise KeyError("Missing key {}".format(repr(key)))
val = self[key]
dict.__delitem__(self, key)
del self.__bw__[val]
def __repr__(self):
return "{{{{{}}}}}".format(",".join(
"{}=>{}".format(repr(k), repr(v)) for k, v in self.items()
))
def clear(self):
dict.clear(self)
self.__bw__.clear()
clear.__doc__ = dict.clear.__doc__
def copy(self):
return OneToOne(self)
copy.__doc__ = dict.copy.__doc__
def update(self, other):
present = set(other.keys()) & set(self.keys())
if len(present) > 0:
raise KeyError("Keys {} are already present".format(repr(present)))
counter = StrictCounter(other.values())
repeating = list(k for k, v in counter.items() if v > 1)
if len(repeating) > 0:
raise KeyError("Some of the values are repeated and cannot be used as keys: {}".format(repr(repeating)))
present = set(other.values()) & set(self.values())
if len(present) > 0:
raise KeyError("Values {} are already present".format(repr(present)))
dict.update(self, other)
self.__bw__.update(dict((v, k) for k, v in other.items()))
update.__doc__ = dict.update.__doc__
def withdraw(self, other):
"""
Withdraws items from this one-to-one. Inverse of `self.update`.
Args:
other (dict): key-values pairs to withdraw;
"""
for k, v in other.items():
if k not in self:
raise KeyError("Missing key {}".format(repr(k)))
if self[k] != v:
raise KeyError("Wrong value {} for key {}: expected {}".format(repr(v), repr(k), self[k]))
for k in other.keys():
del self[k]
def inv(self):
"""
Inverts the one-to-one correspondence.
Returns:
An inverted correspondence.
"""
return OneToOne(self.__bw__)
class Intervals(object):
def __init__(self, *args):
"""
A class representing a set of (closed) intervals in 1D.
Args:
*args (Intervals, iterable): a set of intervals to initialize with.
"""
self.__s__ = []
self.__e__ = []
if len(args) == 2 and isinstance(args[0], (int, float)):
args = (args,)
for i in args:
self.add(*i)
def __iter__(self):
return iter(zip(self.__s__, self.__e__))
def add(self, fr, to):
"""
Adds an interval.
Args:
fr (float): from;
to (float): to;
"""
fr, to = min(fr, to), max(fr, to)
new_s = []
new_e = []
for s, e in self:
if e < fr or s > to:
new_s.append(s)
new_e.append(e)
elif s >= fr and e <= to:
pass
else:
fr = min(fr, s)
to = max(to, e)
new_s.append(fr)
new_e.append(to)
self.__s__ = new_s
self.__e__ = new_e
def __and__(self, other):
if not isinstance(other, Intervals):
other = Intervals(*other)
result = []
for s1, e1 in self:
for s2, e2 in other:
s = max(s1, s2)
e = min(e1, e2)
if s <= e:
result.append((s, e))
return Intervals(*result)
def __nonzero__(self):
return bool(self.__s__)
def __repr__(self):
return "Intervals({})".format(", ".join("({}, {})".format(i, j) for i, j in self))
class MetaArray(numpy.ndarray):
"""Array with metadata (StackOverflow copy-paste)."""
def __new__(cls, array, dtype=None, order=None, **kwargs):
obj = numpy.asarray(array, dtype=dtype, order=order).view(cls)
obj.metadata = kwargs
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.metadata = getattr(obj, 'metadata', None)
def meta(a, **kwargs):
"""
Prepares an array with metadata.
Args:
a (numpy.ndarray): a numpy array;
**kwargs: metadata to save;
Returns:
An array enhanced with metadata.
"""
if isinstance(a, numpy.ndarray):
return MetaArray(a, **kwargs)
else:
return a
def d2t(d):
"""Dict into tuple."""
return tuple(sorted(d.items()))
def e(*args):
"""Numpy optimized einsum."""
for i in args:
if isinstance(i, Number) and i == 0:
return 0
try:
return numpy.einsum(*args, optimize=True)
except TypeError:
return lib.einsum(*args)
def p_count(permutation, destination=None, debug=False):
"""
Counts permutations.
Args:
permutation (iterable): a list of unique integers from 0 to N-1 or any iterable of unique entries if `normal`
is provided;
destination (iterable): ordered elements from `permutation`;
debug (bool): prints debug information if True;
Returns:
The number of permutations needed to achieve this list from a 0..N-1 series.
"""
if destination is None:
destination = sorted(permutation)
if len(permutation) != len(destination):
raise ValueError("Permutation and destination do not match: {:d} vs {:d}".format(len(permutation), len(destination)))
destination = dict((element, i) for i, element in enumerate(destination))
permutation = tuple(destination[i] for i in permutation)
visited = [False] * len(permutation)
result = 0
for i in range(len(permutation)):
if not visited[i]:
j = i
while permutation[j] != i:
j = permutation[j]
result += 1
visited[j] = True
if debug:
print("p_count(" + ", ".join("{:d}".format(i) for i in permutation) + ") = {:d}".format(result))
return result
def p(spec, tensor):
"""
Antisymmetrizes tensor.
Args:
spec (str): a string specifying tensor indexes. Each tensor dimension is represented by the corresponding
symbol in the string using the following rules:
1. Tensor dimensions which do not need to be antisymmetrized are represented by same symbols;
2. Each pair of tensor dimensions with different symbols will be antisymmetrized;
3. The symbol `.` (dot) is a special symbol: the corresponding dimension marked by this symbol will not be
touched;
tensor (numpy.ndarray): a tensor to antisymmetrize;
Returns:
An antisymmetrized tensor.
Examples:
>>> import numpy
>>> from numpy import testing
>>> a = numpy.arange(12).reshape(2, 2, 3)
>>> s = p("ab.", a) # permutes first and second dimensions
>>> testing.assert_allclose(s, a - numpy.swapaxes(a, 0, 1))
True
>>> s = p("aa.", a) # does nothing
>>> testing.assert_allclose(s, a)
True
"""
if isinstance(tensor, Number):
return tensor
result = numpy.zeros_like(tensor)
perm_mask = numpy.array([i for i in spec]) != '.'
all_indexes = numpy.arange(len(spec))
dims = all_indexes
included = set()
for order in itertools.permutations(all_indexes[perm_mask]):
this_spec = ''.join(spec[_i] for _i in order)
if this_spec not in included:
dims[perm_mask] = order
if p_count(order) % 2 == 0:
result += tensor.transpose(dims)
else:
result -= tensor.transpose(dims)
included.add(this_spec)
return result
def _ltri_ix(n, ndims):
"""
Generates lower-triangular part indexes in arbitrary dimensions.
Args:
n (int): dimension size;
ndims (int): number of dimensions;
Returns:
Indexes in an array.
"""
if ndims == 1:
return numpy.arange(n)[:, numpy.newaxis]
else:
result = []
for i in range(ndims-1, n):
x = _ltri_ix(i, ndims-1)
x_arr = numpy.empty((x.shape[0],1), dtype=int)
x_arr[:] = i
result.append(numpy.concatenate((x_arr, x), axis=1,))
return numpy.concatenate(result, axis=0)
def ltri_ix(n, ndims):
"""
Generates lower-triangular part indexes in arbitrary | |
OWS_Output_Type
process_info["outputs"].append(ows2json_io(wps_out))
# generate CWL for WPS-1 using parsed WPS-3
cwl_package = wps2cwl_requirement(wps_service_url, wps_process.identifier)
for io_select in [WPS_INPUT, WPS_OUTPUT]:
io_section = "{}s".format(io_select)
cwl_package[io_section] = list()
for wps_io in process_info[io_section]:
cwl_io, cwl_ns = any2cwl_io(wps_io, io_select)
cwl_package[io_section].append(cwl_io)
if cwl_ns:
if "$namespaces" not in cwl_package:
cwl_package["$namespaces"] = dict()
cwl_package["$namespaces"].update(cwl_ns)
return cwl_package, process_info
def xml_wps2cwl(wps_process_response, settings):
# type: (Response, AnySettingsContainer) -> Tuple[CWL, JSON]
"""
Obtains the ``CWL`` definition that corresponds to a XML WPS-1 process.
Converts a `WPS-1 ProcessDescription XML` tree structure to an equivalent `WPS-3 Process JSON`. and builds the
associated `CWL` package in conformance to :data:`weaver.processes.wps_package.CWL_REQUIREMENT_APP_WPS1`.
:param wps_process_response: valid response (XML, 200) from a `WPS-1 ProcessDescription`.
:param settings: application settings to retrieve additional request options.
"""
def _tag_name(_xml):
# type: (Union[xml_util.XML, str]) -> str
"""
Obtains ``tag`` from a ``{namespace}Tag`` `XML` element.
"""
if hasattr(_xml, "tag"):
_xml = _xml.tag
return _xml.split("}")[-1].lower()
# look for `XML` structure starting at `ProcessDescription` (WPS-1)
xml_resp = xml_util.fromstring(str2bytes(wps_process_response.content))
xml_wps_process = xml_resp.xpath("//ProcessDescription") # type: List[xml_util.XML]
if not len(xml_wps_process) == 1:
raise ValueError("Could not retrieve a valid 'ProcessDescription' from WPS-1 response.")
process_id = None
for sub_xml in xml_wps_process[0]:
tag = _tag_name(sub_xml)
if tag == "identifier":
process_id = sub_xml.text
break
if not process_id:
raise ValueError("Could not find a match for 'ProcessDescription.identifier' from WPS-1 response.")
# transform WPS-1 -> WPS-3
wps = get_wps_client(wps_process_response.url, settings)
wps_service_url = urlparse(wps_process_response.url)
if wps.provider:
wps_service_name = wps.provider.name
else:
wps_service_name = wps_service_url.hostname
wps_process = wps.describeprocess(process_id, xml=wps_process_response.content)
cwl_package, process_info = ows2json(wps_process, wps_service_name, wps_service_url)
return cwl_package, process_info
def is_cwl_file_type(io_info):
# type: (CWL_IO_Type) -> bool
"""
Identifies if the provided `CWL` input/output corresponds to one, many or potentially a ``File`` type(s).
When multiple distinct *atomic* types are allowed for a given I/O (e.g.: ``[string, File]``) and that one of them
is a ``File``, the result will be ``True`` even if other types are not ``Files``.
Potential ``File`` when other base type is ``"null"`` will also return ``True``.
"""
io_type = io_info.get("type")
if not io_type:
raise ValueError("Missing CWL 'type' definition: [{!s}]".format(io_info))
if isinstance(io_type, str):
return io_type == "File"
if isinstance(io_type, dict):
if io_type["type"] == PACKAGE_ARRAY_BASE:
return io_type["items"] == "File"
return io_type["type"] == "File"
if isinstance(io_type, list):
return any(typ == "File" or is_cwl_file_type({"type": typ}) for typ in io_type)
msg = "Unknown parsing of CWL 'type' format ({!s}) [{!s}] in [{}]".format(type(io_type), io_type, io_info)
raise ValueError(msg)
def is_cwl_array_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, MODE, Union[AnyValueType, List[Any]]]
"""
Verifies if the specified I/O corresponds to one of various CWL array type definitions.
:returns:
``tuple(is_array, io_type, io_mode, io_allow)`` where:
- ``is_array``: specifies if the I/O is of array type.
- ``io_type``: array element type if ``is_array`` is True, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if sub-element requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values to be applied if sub-element requires it, defaults to ``AnyValue``.
:raises PackageTypeError: if the array element doesn't have the required values and valid format.
"""
# use mapping to allow sub-function updates
io_return = {
"array": False,
"allow": AnyValue,
"type": io_info["type"],
"mode": MODE.NONE,
}
def _update_if_sub_enum(_io_item):
# type: (CWL_IO_Type) -> bool
"""
Updates the ``io_return`` parameters if ``io_item`` evaluates to a valid ``enum`` type.
Parameter ``io_item`` should correspond to field ``items`` of an array I/O definition.
Simple pass-through if the array item is not an ``enum``.
"""
_is_enum, _enum_type, _enum_mode, _enum_allow = is_cwl_enum_type({"type": _io_item}) # noqa: typing
if _is_enum:
LOGGER.debug("I/O [%s] parsed as 'array' with sub-item as 'enum'", io_info["name"])
io_return["type"] = _enum_type
io_return["mode"] = _enum_mode
io_return["allow"] = _enum_allow
return _is_enum
# optional I/O could be an array of '["null", "<type>"]' with "<type>" being any of the formats parsed after
# is it the literal representation instead of the shorthand with '?'
if isinstance(io_info["type"], list) and any(sub_type == "null" for sub_type in io_info["type"]):
# we can ignore the optional indication in this case because it doesn't impact following parsing
io_return["type"] = list(filter(lambda sub_type: sub_type != "null", io_info["type"]))[0]
# array type conversion when defined as '{"type": "array", "items": "<type>"}'
# validate against 'Hashable' instead of 'dict' since 'OrderedDict'/'CommentedMap' can fail 'isinstance()'
if (
not isinstance(io_return["type"], str)
and not isinstance(io_return["type"], Hashable)
and "items" in io_return["type"]
and "type" in io_return["type"]
):
io_type = dict(io_return["type"]) # make hashable to allow comparison
if io_type["type"] != PACKAGE_ARRAY_BASE:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
# parse enum in case we got an array of allowed symbols
is_enum = _update_if_sub_enum(io_type["items"])
if not is_enum:
io_return["type"] = io_type["items"]
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with nested dict notation", io_info["name"])
io_return["array"] = True
# array type conversion when defined as string '<type>[]'
elif isinstance(io_return["type"], str) and io_return["type"] in PACKAGE_ARRAY_TYPES:
io_return["type"] = io_return["type"][:-2] # remove '[]'
if io_return["type"] in PACKAGE_CUSTOM_TYPES:
# parse 'enum[]' for array of allowed symbols, provide expected structure for sub-item parsing
io_item = deepcopy(io_info)
io_item["type"] = io_return["type"] # override corrected type without '[]'
_update_if_sub_enum(io_item)
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with shorthand '[]' notation", io_info["name"])
io_return["array"] = True
return io_return["array"], io_return["type"], io_return["mode"], io_return["allow"]
def is_cwl_enum_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, int, Optional[CWL_IO_EnumSymbols]]
"""
Verifies if the specified I/O corresponds to a CWL enum definition.
:returns:
``tuple(is_enum, io_type, io_allow)`` where:
- ``is_enum``: specifies if the I/O is of enum type.
- ``io_type``: enum base type if ``is_enum=True``, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if input requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values of the enum.
:raises PackageTypeError: if the enum doesn't have the required parameters and valid format.
"""
io_type = io_info["type"]
if not isinstance(io_type, dict) or "type" not in io_type or io_type["type"] not in PACKAGE_CUSTOM_TYPES:
return False, io_type, MODE.NONE, None
if "symbols" not in io_type:
raise PackageTypeError("Unsupported I/O 'enum' definition: '{!r}'.".format(io_info))
io_allow = io_type["symbols"]
if not isinstance(io_allow, list) or len(io_allow) < 1:
raise PackageTypeError("Invalid I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
# validate matching types in allowed symbols and convert to supported CWL type
first_allow = io_allow[0]
for io_i in io_allow:
if type(io_i) is not type(first_allow):
raise PackageTypeError("Ambiguous types in I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
if isinstance(first_allow, str):
io_type = "string"
elif isinstance(first_allow, float):
io_type = "float"
elif isinstance(first_allow, int):
io_type = "int"
else:
raise PackageTypeError("Unsupported I/O 'enum' base type: `{!s}`, from definition: `{!r}`."
.format(type(first_allow), io_info))
# allowed value validator mode must be set for input
return True, io_type, MODE.SIMPLE, io_allow
def get_cwl_io_type(io_info):
# type: (CWL_IO_Type) -> Tuple[str, bool]
"""
Obtains the basic type of the CWL input and identity if it is optional.
CWL allows multiple shorthand representation or combined types definition.
The *base* type must be extracted in order to identify the expected data format and supported values.
Obtains real type if ``"default"`` or shorthand ``"<type>?"`` was in CWL, which
can also be defined as type ``["null", <type>]``.
CWL allows multiple distinct types (e.g.: ``string`` and ``int`` simultaneously), but not WPS inputs.
WPS allows only different amount of *same type* through ``minOccurs`` and ``maxOccurs``.
Considering WPS conversion, we can also have following definition ``["null", <type>, <array-type>]`` (same type).
Whether single or array-like type, the base type can be extracted.
:param io_info: definition of the CWL input.
:return: tuple of guessed base type and flag indicating if it can be null (optional input).
"""
io_type = io_info["type"]
is_null = False
if isinstance(io_type, list):
if not len(io_type) > 1:
raise PackageTypeError("Unsupported I/O type as list cannot have only one base type: '{}'".format(io_info))
if "null" in io_type:
if len(io_type) == 1:
raise PackageTypeError("Unsupported I/O cannot be only 'null' type: '{}'".format(io_info))
LOGGER.debug("I/O parsed for 'default'")
is_null = True # I/O can be omitted since default value exists
io_type = [typ for typ in io_type if typ != "null"]
if len(io_type) == 1: # valid if other was "null" now removed
io_type = io_type[0]
else:
# check that many sub-type definitions all match same base type (no conflicting literals)
io_type_many = set()
io_base_type = None
for i, typ in | |
<reponame>securedataplane/preacher
#!/usr/bin/env python
"""
drivers for ovsdb commands.
<EMAIL>
AUG 10 2015
"""
import pexpect
import re
import json
import types
import time
import os
from drivers.common.clidriver import CLI
class OvsdbDriver( CLI ):
def __init__( self ):
"""
Initialize client
"""
self.name = None
self.home = None
self.handle = None
super( CLI, self ).__init__()
def connect( self, **connectargs ):
try:
for key in connectargs:
vars( self)[ key ] = connectargs[ key ]
self.name = self.options[ 'name' ]
if os.getenv( str( self.ip_address ) ) != None:
self.ip_address = os.getenv(str ( self.ip_address ) )
else:
main.log.info( self.name + ": Trying to connect to " +
self.ip_address )
self.handle = super( OvsdbDriver, self ).connect(
user_name=self.user_name,
ip_address=self.ip_address,
port=self.port,
pwd=<PASSWORD>)
if self.handle:
return self.handle
main.log.onfo( "Connection successful to the ovsdb node " +
self.name )
else:
main.log.error( "Connection failed to the ovsdb node " +
self.name )
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
except Exception:
main.log.exception( self.name + ": Uncaught exception!" )
main.cleanup()
main.exit()
def disconnect( self ):
try:
self.handle.sendline( "exit" )
self.handle.expect( "closed" )
response = main.TRUE
except pexpect.ExceptionPexpect:
response = main.FALSE
main.log.exception( self.name + ": Uncaught exception!" )
return response
def setManager( self, ip, port, delaytime="5" ):
command= "sudo ovs-vsctl set-manager tcp:" + str( ip ) + ":" + str( port )
try:
handle = self.execute(
cmd=command,
timeout=10 )
if re.search( "Error", handle ):
main.log.error( "Error in set ovsdb manager" )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Ovsdb manager " + str( ip ) + " set" )
#delay time for ovsdb connection create
main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection create" )
time.sleep( int( delaytime ) )
return main.TRUE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def delManager( self, delaytime="5" ):
command= "sudo ovs-vsctl del-manager"
try:
handle = self.execute(
cmd=command,
timeout=10 )
if re.search( "Error", handle ):
main.log.error( "Error in delete ovsdb manager" )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Ovsdb manager delete" )
#delay time for ovsdb connection delete
main.log.info( "Wait " + str( delaytime ) + " seconds for ovsdb connection delete" )
time.sleep( int( delaytime ) )
return main.TRUE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def getManager( self ):
command= "sudo ovs-vsctl get-manager"
try:
response = self.execute(
cmd=command,
timeout=10 )
return response
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def listBr( self ):
"""
Parameters:
none
Return:
The output of the command from the linux
or main.FALSE on timeout
"""
command= "sudo ovs-vsctl list-br"
try:
response = self.execute(
cmd=command,
timeout=10 )
if response:
return response
else:
return main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def listPorts( self, sw ):
"""
Parameters:
sw: The name of an OVS switch. Example "s1"
Return:
The output of the command from the linux
or main.FALSE on timeout
"""
command= "sudo ovs-vsctl list-ports " + str( sw )
try:
response = self.execute(
cmd=command,
timeout=10 )
if response:
return response
else:
return main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def getController( self, sw ):
"""
Parameters:
sw: The name of an OVS switch. Example "s1"
Return:
The output of the command from the mininet cli
or main.FALSE on timeout"""
command = "sudo ovs-vsctl get-controller " + str( sw )
try:
response = self.execute(
cmd=command,
timeout=10)
if response:
return response
else:
return main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def show( self ):
"""
Parameters:
none
Return:
The output of the command from the linux
or main.FALSE on timeout"""
command = "sudo ovs-vsctl show "
try:
response = self.execute(
cmd=command,
timeout=10)
if response:
return response
else:
return main.FALSE
except pexpect.EOF:
main.log.error( self.name + ": EOF exception found" )
main.log.error( self.name + ": " + self.handle.before )
main.cleanup()
main.exit()
def dumpFlows( self, sw, protocols=None ):
"""
Parameters:
sw: The name of an OVS switch. Example "s1"
Return:
The output of the command from the linux
or main.FALSE on timeout"""
if protocols:
command = "sudo ovs-ofctl -O " + \
protocols + " dump-flows " + str( sw )
else:
command = "sudo ovs-ofctl dump-flows " + str( sw )
try:
response = self.execute(
cmd=command,
timeout=10 )
if response:
return response
else:
return main.FALSE
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
main.log.error(self.name + ": " + self.handle.before)
main.cleanup()
main.exit()
def createHost( self, hostname ):
command = "sudo ip netns add " + str( hostname )
try:
handle = self.execute(
cmd=command,
timeout=10)
if re.search( "Error", handle ):
main.log.error( "Error in create host" + str( hostname ) )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Create " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
main.log.error(self.name + ": " + self.handle.before)
main.cleanup()
main.exit()
def createHostport(self, hostname="host1", hostport="host1-eth0", ovsport="port1", hostportmac="000000000001" ):
command = "sudo ip link add " + str(hostport) +" type veth peer name " + str(ovsport)
command += ";" + "sudo ip link set " + str(hostport) + " up"
command += ";" + "sudo ip link set " + str(ovsport) + " up"
command += ";" +" sudo ifconfig " + str(hostport) + " hw ether " + str(hostportmac)
command += ";" +" sudo ip link set " + str(hostport) + " netns " + str(hostname)
try:
handle = self.execute(
cmd=command,
timeout=10)
if re.search( "Error", handle ):
main.log.error( "Error in create host port " + str( hostport ) + " on " + str( hostname ) )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Create host port " + str( hostport ) + " on " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
main.log.error(self.name + ": " + self.handle.before)
main.cleanup()
main.exit()
def addPortToOvs(self, ifaceId, attachedMac, vmuuid, port="port1", ovsname="br-int" ):
command = "sudo ovs-vsctl add-port " + str(ovsname) +" " + str(port)
if ifaceId:
command += " -- set Interface " + str(port) + " external-ids:iface-id=" + str(ifaceId) + " external-ids:iface-status=active"
if attachedMac:
command += " external-ids:attached-mac=" + str(attachedMac)
if vmuuid:
command += " external-ids:vm-uuid=" + str(vmuuid)
try:
handle = self.execute(
cmd=command,
timeout=10)
if re.search( "Error", handle ):
main.log.error( "Error in add port " + str(port) + " to ovs " + str( ovsname ) )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Add port " + str(port) + " to ovs " + str( ovsname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
main.log.error(self.name + ": " + self.handle.before)
main.cleanup()
main.exit()
def setHostportIp(self, ip, hostname="host1", hostport1="host1-eth0" ):
command = "sudo ip netns exec " + str(hostname) +" ifconfig " + str(hostport1) + " " + str(ip)
try:
handle = self.execute(
cmd=command,
timeout=10)
if re.search( "Error", handle ):
main.log.error( "Error in set host ip for " + str( hostport1 ) + " on host " + str( hostname ) )
main.log.error( handle )
return main.FALSE
else:
main.log.info( "Set host ip for " + str( hostport1 ) + " on host " + str( hostname ) + " sucess" )
return main.TRUE
except pexpect.EOF:
main.log.error(self.name + ": EOF exception found")
main.log.error(self.name + ": " + self.handle.before)
main.cleanup()
main.exit()
def hostPing(self, src, target, hostname="host1" ):
if src:
command = "sudo ip netns exec " + str( hostname ) +" ping -c 1 -S " +\
str( src ) + " " + str( target )
else:
command = "sudo ip netns exec | |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import glance_store as store_api
from glance_store import backend
from glance_store import exceptions as store_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from taskflow.patterns import linear_flow as lf
from taskflow import retry
from taskflow import task
import glance.async_.flows._internal_plugins as internal_plugins
import glance.async_.flows.plugins as import_plugins
from glance.common import exception
from glance.common.scripts.image_import import main as image_import
from glance.common.scripts import utils as script_utils
from glance.i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
api_import_opts = [
cfg.ListOpt('image_import_plugins',
item_type=cfg.types.String(quotes=True),
bounds=True,
sample_default='[no_op]',
default=[],
help=_("""
Image import plugins to be enabled for task processing.
Provide list of strings reflecting to the task Objects
that should be included to the Image Import flow. The
task objects needs to be defined in the 'glance/async/
flows/plugins/*' and may be implemented by OpenStack
Glance project team, deployer or 3rd party.
By default no plugins are enabled and to take advantage
of the plugin model the list of plugins must be set
explicitly in the glance-image-import.conf file.
The allowed values for this option is comma separated
list of object names in between ``[`` and ``]``.
Possible values:
* no_op (only logs debug level message that the
plugin has been executed)
* Any provided Task object name to be included
in to the flow.
""")),
]
CONF.register_opts(api_import_opts, group='image_import_opts')
# TODO(jokke): We should refactor the task implementations so that we do not
# need to duplicate what we have already for example in base_import.py.
class _DeleteFromFS(task.Task):
def __init__(self, task_id, task_type):
self.task_id = task_id
self.task_type = task_type
super(_DeleteFromFS, self).__init__(
name='%s-DeleteFromFS-%s' % (task_type, task_id))
def execute(self, file_path):
"""Remove file from the backend
:param file_path: path to the file being deleted
"""
if CONF.enabled_backends:
store_api.delete(file_path, 'os_glance_staging_store')
else:
# TODO(abhishekk): After removal of backend module from
# glance_store need to change this to use multi_backend
# module.
file_path = file_path[7:]
if os.path.exists(file_path):
try:
LOG.debug(_("After upload to the backend, deleting staged "
"image data from %(fn)s"), {'fn': file_path})
os.unlink(file_path)
except OSError as e:
LOG.error(_("After upload to backend, deletion of staged "
"image data from %(fn)s has failed because "
"[Errno %(en)d]"), {'fn': file_path,
'en': e.errno})
else:
LOG.warning(_("After upload to backend, deletion of staged "
"image data has failed because "
"it cannot be found at %(fn)s"), {
'fn': file_path})
class _VerifyStaging(task.Task):
# NOTE(jokke): This could be also for example "staging_path" but to
# keep this compatible with other flows we want to stay consistent
# with base_import
default_provides = 'file_path'
def __init__(self, task_id, task_type, task_repo, uri):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.uri = uri
super(_VerifyStaging, self).__init__(
name='%s-ConfigureStaging-%s' % (task_type, task_id))
# NOTE(jokke): If we want to use other than 'file' store in the
# future, this is one thing that needs to change.
try:
uri.index('file:///', 0)
except ValueError:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Value of node_staging_uri must be "
" in format 'file://<absolute-path>'") %
{'task_id': self.task_id,
'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
if not CONF.enabled_backends:
# NOTE(jokke): We really don't need the store for anything but
# verifying that we actually can build the store will allow us to
# fail the flow early with clear message why that happens.
self._build_store()
def _build_store(self):
# TODO(abhishekk): After removal of backend module from glance_store
# need to change this to use multi_backend module.
# NOTE(jokke): If we want to use some other store for staging, we can
# implement the logic more general here. For now this should do.
# NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're
# forced to build our own config object, register the required options
# (and by required I mean *ALL* of them, even the ones we don't want),
# and create our own store instance by calling a private function.
# This is certainly unfortunate but it's the best we can do until the
# glance_store refactor is done. A good thing is that glance_store is
# under our team's management and it gates on Glance so changes to
# this API will (should?) break task's tests.
conf = cfg.ConfigOpts()
try:
backend.register_opts(conf)
except cfg.DuplicateOptError:
pass
conf.set_override('filesystem_store_datadir',
CONF.node_staging_uri[7:],
group='glance_store')
# NOTE(flaper87): Do not even try to judge me for this... :(
# With the glance_store refactor, this code will change, until
# that happens, we don't have a better option and this is the
# least worst one, IMHO.
store = backend._load_store(conf, 'file')
try:
store.configure()
except AttributeError:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Could not load the filesystem store") %
{'task_id': self.task_id, 'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
def execute(self):
"""Test the backend store and return the 'file_path'"""
return self.uri
class _ImportToStore(task.Task):
def __init__(self, task_id, task_type, image_repo, uri, image_id, backend,
allow_failure, set_active):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
self.uri = uri
self.image_id = image_id
self.backend = backend
self.allow_failure = allow_failure
self.set_active = set_active
super(_ImportToStore, self).__init__(
name='%s-ImportToStore-%s' % (task_type, task_id))
def execute(self, file_path=None):
"""Bringing the imported image to back end store
:param image_id: Glance Image ID
:param file_path: path to the image file
"""
# NOTE(flaper87): Let's dance... and fall
#
# Unfortunatelly, because of the way our domain layers work and
# the checks done in the FS store, we can't simply rename the file
# and set the location. To do that, we'd have to duplicate the logic
# of every and each of the domain factories (quota, location, etc)
# and we'd also need to hack the FS store to prevent it from raising
# a "duplication path" error. I'd rather have this task copying the
# image bits one more time than duplicating all that logic.
#
# Since I don't think this should be the definitive solution, I'm
# leaving the code below as a reference for what should happen here
# once the FS store and domain code will be able to handle this case.
#
# if file_path is None:
# image_import.set_image_data(image, self.uri, None)
# return
# NOTE(flaper87): Don't assume the image was stored in the
# work_dir. Think in the case this path was provided by another task.
# Also, lets try to neither assume things nor create "logic"
# dependencies between this task and `_ImportToFS`
#
# base_path = os.path.dirname(file_path.split("file://")[-1])
# NOTE(flaper87): Hopefully just scenarios #3 and #4. I say
# hopefully because nothing prevents the user to use the same
# FS store path as a work dir
#
# image_path = os.path.join(base_path, image_id)
#
# if (base_path == CONF.glance_store.filesystem_store_datadir or
# base_path in CONF.glance_store.filesystem_store_datadirs):
# os.rename(file_path, image_path)
#
# image_import.set_image_data(image, image_path, None)
# NOTE(jokke): The different options here are kind of pointless as we
# will need the file path anyways for our delete workflow for now.
# For future proofing keeping this as is.
image = self.image_repo.get(self.image_id)
if image.status == "deleted":
raise exception.ImportTaskError("Image has been deleted, aborting"
" import.")
try:
image_import.set_image_data(image, file_path or self.uri,
self.task_id, backend=self.backend,
set_active=self.set_active)
# NOTE(yebinama): set_image_data catches Exception and raises from
# them. Can't be more specific on exceptions catched.
except Exception:
if not self.allow_failure:
raise
msg = (_("%(task_id)s of %(task_type)s failed but since "
"allow_failure is set to true, continue.") %
{'task_id': self.task_id, 'task_type': self.task_type})
LOG.warning(msg)
if self.backend is not None:
failed_import = image.extra_properties.get(
'os_glance_failed_import', '').split(',')
failed_import.append(self.backend)
image.extra_properties[
'os_glance_failed_import'] = ','.join(failed_import)
if self.backend is not None:
importing = image.extra_properties.get(
'os_glance_importing_to_stores', '').split(',')
try:
importing.remove(self.backend)
image.extra_properties[
'os_glance_importing_to_stores'] = ','.join(importing)
except ValueError:
LOG.debug("Store %s not found in property "
"os_glance_importing_to_stores.", self.backend)
# NOTE(flaper87): We need to save the image again after
# the locations have been set in the image.
self.image_repo.save(image)
def | |
<gh_stars>1-10
import os
import os.path
import csv
import pickle
import processing
from .Tjulia.test.MyLog import MyLog
import geopandas as gpd
import osmnx as ox
from PyQt5 import uic, QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSignal, QVariant
from PyQt5.QtWidgets import QFileDialog, QTreeWidgetItem, QTableWidget, QTableWidgetItem
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QMessageBox
from qgis.core import (QgsProject, QgsVectorLayer, QgsField, QgsRasterLayer,
QgsFeature, QgsVertexId, QgsMultiPoint, QgsGeometry, QgsCoordinateTransform)
from .dhcoptimizerplanheat.streets_downloader import streetsDownloader
from .layer_utils import load_file_as_layer, load_open_street_maps
from .dialogSources import CheckSourceDialog
from .utility.SourceAvailability import SourceAvailability
from .utility.SourceAvailabilityPostCalculation import SourceAvailabilityPostCalculation
from .utility.data_manager.DataTransfer import DataTransfer
from .city.src.FileManager import FileManager
from . import master_planning_config as mp_config
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'ui', 'Step0dockwidget.ui'))
class Step0Dialog(QtWidgets.QDockWidget, FORM_CLASS):
district_shp_loaded = pyqtSignal()
buildings_shp_loaded = pyqtSignal()
buildings_shp_loaded2 = pyqtSignal()
step0_closing_signal = pyqtSignal()
file_removed = pyqtSignal()
send_data_to_step2 = pyqtSignal(dict, dict)
buildings_shp_loaded_step1signal = pyqtSignal(QgsVectorLayer)
buildings_shp_loaded_step4signal = pyqtSignal(QgsVectorLayer)
step0_all_import_complete = pyqtSignal(QgsVectorLayer, DataTransfer)
# csv_loaded = pyqtSignal(QTableWidget)
send_tab_sources = pyqtSignal(QTableWidget)
h8760 = 8760
h24 = 24
day_per_month = {28: [2], 30: [11, 4, 6, 9], 31: [1, 3, 5, 7, 8, 10, 12]}
def __init__(self, iface, parent=None, work_folder=None):
"""Constructor."""
super(Step0Dialog, self).__init__(parent)
# Set up the user interface from Designer through FORM_CLASS.
# After self.setupUi() you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.work_folder = work_folder
self.listWidget.hide()
self.comboLayer.hide()
self.iface = iface
self.district_shp_loaded.connect(self.fill_district_menu)
self.buildings_shp_loaded.connect(self.fill_buildings_table)
self.buildings_shp_loaded2.connect(self.fill_buildings_table_future)
self.listWidget.itemChanged.connect(self.list_district_select)
# QgsProject.instance().layersAdded.connect(self.fill_layers_combobox)
# QgsProject.instance().layerWasAdded.connect(self.fill_layers_combobox)
# QgsProject.instance().layerRemoved.connect(self.fill_layers_combobox)
self.btnSourcesAvailability.clicked.connect(self.source_availability)
#self.pushButton_4.clicked.connect(self.download_streets_from_comboBox_selection)
self.delete_file.clicked.connect(self.delete_import_file)
self.ok2.clicked.connect(self.send_tab_to_stap3)
self.baseline_scenario_layer = None
self.phases.setTabEnabled(0, False)
self.phases.setTabEnabled(2, False)
self.all_import_completed = True
self.geo_graph = None
self.data_transfer = DataTransfer()
self.data_transfer.geo_graph = self.geo_graph
self.data_transfer.buildings = self.baseline_scenario_layer
# QgsProject.layerRemoved.connect(self.update_source_combobox)
# QgsProject.layerWasAdded.connect(self.update_source_combobox)
self.dialog_source = CheckSourceDialog()
self.fill_layers_combobox(1)
self.pbar_Download.hide()
self.label_3.setEnabled(False)
self.label_9.setEnabled(False)
self.layerPath2.setEnabled(False)
self.layerPath3.setEnabled(False)
self.load1.setEnabled(False)
self.load2.setEnabled(False)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"open_file.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.load.setIcon(icon)
self.load1.setIcon(icon)
self.load2.setIcon(icon)
self.load3.setIcon(icon)
self.load4.setIcon(icon)
#self.load_streets.setIcon(icon)
self.pushButton_5.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"untitled.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.delete_file.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"save_as.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.save_plugin.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"import.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.pushButton_load_all_files.setIcon(icon)
self.combo_box_layer = None
self.future_scenario_layer = None
default_root = mp_config.CURRENT_MAPPING_DIRECTORY
self.layerPath.setEnabled(False)#setDefaultRoot(default_root)
self.layerPath.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+".shp"))
self.layerPath1.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath1.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+mp_config.DMM_FUTURE_SUFFIX+".shp"))
self.layerPath2.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath2.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+".scn"))
self.layerPath3.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath3.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+mp_config.DMM_FUTURE_SUFFIX+mp_config.DMM_HOURLY_SUFFIX+".csv"))
#self.layerPath_streets.setDefaultRoot(os.path.expanduser("~"))
self.folder.setEnabled(False)
self.btnSmm.setEnabled(False)
self.folder.setText(os.path.join(default_root, "SMM"))
self.sources_availability = None
self.sources_temperature = None
self.cancel.hide()
self.pushButton_2.hide()
self.pushButton.hide()
self.pushButton_3.hide()
def load_all_files_from_folder(self):
folder = self.folder.text()
if not os.path.exists(folder):
QMessageBox.warning(None, "Warning", "Folder " + folder + " does not exist")
return
# Counting number of files to be open in the selected folder
file_counter = 0
for root, dirs, files in os.walk(folder):
for file in files:
if file.endswith('.tif') or file.endswith('.shp'):
file_counter += 1
if file_counter==0:
QMessageBox.information(None, "Warning", "Folder " + folder + " does not contain .tif or .shp files. There's nothing to load, here!")
return
# setting progress bar
self.progressBar.setMaximum(file_counter)
self.progressBar.setMinimum(0)
self.progressBar.setValue(0)
self.progressBar.show()
progress = 0
# loading all .tif and .shp files. Files already loaded are ignored.
for root, dirs, files in os.walk(folder):
for filename in files:
if filename[-4:] == ".shp":
progress = progress + 1
file_path = folder + "\\" + filename
if not QgsProject.instance().mapLayersByName(filename):
load_file_as_layer(file_path, filename, 'Shapefiles from SMM', min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
else:
print("File " + file_path + "seems to be already loaded! Skipping file...")
if filename[-4:] == ".tif":
progress = progress + 1
file_path = folder + "\\" + filename
if not QgsProject.instance().mapLayersByName(filename):
load_file_as_layer(file_path, filename, 'Raster layers from SMM',
min_val=None, max_val=None, mean_val=None, value_color=None, area=None)
else:
print("File " + file_path + "seems to be already loaded! Skipping file...")
self.progressBar.setValue(progress)
load_open_street_maps()
# emit signal to activate and fill (reload) the district list in the district tab of step0
self.district_shp_loaded.emit()
self.progressBar.hide()
self.get_temperature_from_mapping_module(folder)
def load_folder(self):
folder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
self.folder.setText(folder)
def load_shp_file4(self):
print("Downloading baseline scenario ...")
layer_list = QgsProject.instance().mapLayersByName("baseline scenario")
if len(layer_list)>0:
self.baseline_scenario_layer = layer_list[0]
else:
layerShp = self.layerPath.filePath()
self.baseline_scenario_layer = load_file_as_layer(layerShp, 'baseline scenario', None, min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
self.buildings_shp_loaded.emit()
self.buildings_shp_loaded_step1signal.emit(self.baseline_scenario_layer)
print("End downloading baseline scenario")
def load_shp_file3(self):
print("Downloading future scenario ...")
Flayer = self.layerPath1.filePath()
self.future_scenario_layer= load_file_as_layer(Flayer, 'future scenario', None, min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
self.buildings_shp_loaded2.emit()
self.buildings_shp_loaded_step4signal.emit(self.future_scenario_layer)
print("End downloading future scenario ...")
def fill_district_menu(self):
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
#retrieve district code list and update
self.listWidget.clear()
district_code_list = []
features = layer.getFeatures()
for feature in features:
district_code_list.append(feature.attribute(2))
self.listWidget.addItems(district_code_list)
for i in range(self.listWidget.count()):
self.listWidget.item(i).setFlags(self.listWidget.item(i).flags() | QtCore.Qt.ItemIsUserCheckable)
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
def add_district_selection(self):
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
features = layer.selectedFeatures()
for i in range(self.listWidget.count()):
for feature in features:
if self.listWidget.item(i).text() == feature.attribute(2):
self.listWidget.item(i).setCheckState(QtCore.Qt.Checked)
def override_district_selection(self):
# uncheck all the district and add the new selection
for i in range(self.listWidget.count()):
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
self.add_district_selection()
def reverse_district_selection(self):
# check the state of each item and change it
for i in range(self.listWidget.count()):
if self.listWidget.item(i).checkState():
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
else:
self.listWidget.item(i).setCheckState(QtCore.Qt.Checked)
def loadScenario(self):
fname = self.layerPath2.filePath()
with open(fname, "rb") as fichero:
load_data = pickle.load(fichero)
self.pname.setText(load_data.scenarioName)
self.pname.show()
self.label_6.show()
self.areaStudy.setText(str(load_data.scenarioVersion))
self.areaStudy.show()
self.label_7.show()
self.country.setText(load_data.country)
self.country.show()
self.label_8.show()
def fill_buildings_table(self):
layer_list = QgsProject.instance().mapLayersByName("baseline scenario")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
features = layer.getFeatures()
building_item_list = []
fields = ["BuildingID", "AHeatDem", "AHeatDemM2", "ACoolDem", "ACoolDemM2", "ADHWDem",
"ADHWDemM2", "MaxHeatDem", "MaxCoolDem", "MaxDHWDem", "Use", "GrossFA"]
missing_fields = set()
for feature in features:
if len(feature.attributes()) > 13:
# Field names
# BuildingID AHeatDem AHeatDemM2 ACoolDem ACoolDemM2 ADHWDem ADHWDemDM2 MaxHeatDem MaxCoolDem MaxDHWDem USE, GrossFloor area
#string_list = [str(feature.attribute(3)), str(feature.attribute(5)), str(feature.attribute(6)),
# str(feature.attribute(7)), str(feature.attribute(8)), str(feature.attribute(9)),
# str(feature.attribute(10)), str(feature.attribute(11)), str(feature.attribute(12)),
# str(feature.attribute(13)), str(feature.attribute(14)), str(feature.attribute(18))]
string_list = []
for f in fields:
try:
string_list.append(str(feature[f]))
except:
missing_fields.add(f)
string_list.append('')
building_item_list.append(QTreeWidgetItem(string_list))
self.pmTree.addTopLevelItems(building_item_list)
if len(missing_fields) > 0:
self.iface.messageBar().pushMessage("Field {0} is missing in ths baseline shape file".format(missing_fields), level=1, duration=0)
def fill_buildings_table_future(self):
layer_name = "future scenario"
layer_list2 = QgsProject.instance().mapLayersByName(layer_name)
flag = True
if len(layer_list2) > 0:
layer2 = layer_list2[0]
else:
layer2 = None
if layer2 is not None:
features2 = layer2.getFeatures()
building_item_list2 = []
fields = ["BuildingID", "AHeatDem", "AHeatDemM2", "ACoolDem", "ACoolDemM2", "ADHWDem",
"ADHWDemM2", "MaxHeatDem", "MaxCoolDem", "MaxDHWDem", "Use", "GrossFA"]
missing_fields = set()
for feature in features2:
if len(feature.attributes()) > 13:
# Field names
# BuildingID AHeatDem AHeatDemM2 ACoolDem ACoolDemM2 ADHWDem ADHWDemDM2 MaxHeatDem MaxCoolDem MaxDHWDem USE, GrossFloor area
#string_list = [str(feature.attribute(3)), str(feature.attribute(5)), str(feature.attribute(6)),
# str(feature.attribute(7)), str(feature.attribute(8)), str(feature.attribute(9)),
# str(feature.attribute(10)), str(feature.attribute(11)), str(feature.attribute(12)),
# str(feature.attribute(13)), str(feature.attribute(14)), str(feature.attribute(18))]
string_list2 = []
for f in fields:
try:
string_list2.append(str(feature[f]))
except:
missing_fields.add(f)
string_list2.append('')
building_item_list2.append(QTreeWidgetItem(string_list2))
self.pmTree2.addTopLevelItems(building_item_list2)
if len(missing_fields) > 0:
self.iface.messageBar().pushMessage("Field {0} is missing in ths future shape file".format(missing_fields), level=1, duration=0)
def load_csv_file(self):
self.list_district_select()
# self.csv_loaded.emit(self.sources_available)
# confronta i distretti selezionati e riempe la tabella
def list_district_select(self):
lista = self.list_district()
csvFile = self.layerPath3.filePath()
if not os.path.exists(csvFile):
return
data = []
self.sources_available.clear()
self.sources_available.insertRow(0)
with open(csvFile)as inputFile:
csvReader = csv.reader(inputFile, delimiter='\t')
for row in csvReader:
data.append(row)
index_list = []
for i in range(len(lista)):
if lista[i] in data[0][:]:
index_list.append(data[0][:].index(lista[i]))
self.sources_available.clear()
totalColumns = self.sources_available.columnCount()
headerList = lista
headerList.insert(0, "Source Description")
headerList.append("Total")
self.sources_available.setRowCount(0)
for i in range(totalColumns - 1, -1, -1):
self.sources_available.removeColumn(i)
for j in range(len(lista)):
self.sources_available.insertColumn(j)
self.sources_available.setHorizontalHeaderLabels(headerList)
# data[riga][colonna]
for k in range(len(data) - 1):
self.sources_available.insertRow(k)
for z in range(len(lista)):
if z == 0:
self.sources_available.setItem(k, z, QTableWidgetItem(str(data[k + 1][0])))
else:
if z < len(index_list) + 1:
self.sources_available.setItem(k, z, QTableWidgetItem(str(data[k + 1][index_list[z - 1]])))
else:
self.sources_available.setItem(k, z, QTableWidgetItem(self.somma(k)))
def somma(self, q):
total = 0
try:
column = self.sources_available.columnCount() - 2
for p in range(column):
num = float(self.sources_available.item(q, p + 1).text())
total = total + num
except:
pass
return str(total)
def list_district(self):
# return list of selected district
distretti = []
for i in range(self.listWidget.count()):
if self.listWidget.item(i).checkState():
# self.listWidget.item(i).setCheckState(list.addItem())
distretti.append(self.listWidget.item(i).text())
return distretti
def reset_function(self):
self.pmTree.clear()
self.pmTree2.clear()
def closeEvent(self, event):
self.closeStep0()
event.accept()
def closeStep0(self):
self.hide()
self.step0_closing_signal.emit()
def activate_visualization_tabs(self):
self.listWidget.clear()
self.pushButton_2.setEnabled(False)
self.pushButton_3.setEnabled(False)
self.pushButton.setEnabled(False)
# Districts layer it's expected to be called projection_helper.shp
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None and self.baseline_scenario_layer is not None:
if not layer.crs() == self.baseline_scenario_layer.crs():
parameter = {'INPUT': layer, 'TARGET_CRS': self.baseline_scenario_layer.crs().authid(),
'OUTPUT': 'memory:'}
p = processing.run('qgis:reprojectlayer', parameter)
layer = p['OUTPUT']
# do this only if all_import_completed flag it's true
if self.all_import_completed:
# setting progress bar
self.progressBar.setMaximum(self.baseline_scenario_layer.featureCount())
self.progressBar.setMinimum(0)
self.progressBar.setValue(0)
self.progressBar.show()
| |
to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('--nuage-vrs-ovf', required=False, help='The URL of the VRS OVF file', dest='nuage_vrs_ovf', type=str)
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect', dest='nosslcheck', action='store_true')
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
parser.add_argument('--vcenter-host', required=True, help='The vCenter server to connect to, use the IP', dest='vcenter_host', type=str)
parser.add_argument('--vcenter-name', required=False, help='The name of the vCenter you want in the vCenter Deployment Tool', dest='vcenter_name', type=str)
parser.add_argument('--vcenter-http-port', required=False, help='The vCenter server HTTP port to connect to (default = 80)', dest='vcenter_http_port', type=int, default=80)
parser.add_argument('--vcenter-https-port', required=False, help='The vCenter server HTTPS port to connect to (default = 443)', dest='vcenter_https_port', type=int, default=443)
parser.add_argument('--vcenter-password', required=False, help='The password with which to connect to the vCenter host. If not specified, the user is prompted at runtime for a password', dest='vcenter_password', type=str)
parser.add_argument('--vcenter-user', required=True, help='The username with which to connect to the vCenter host', dest='vcenter_username', type=str)
args = parser.parse_args()
return args
def handle_vdt_datacenter(logger, nc, vc, nuage_vcenter, vc_dc, nc_dc_list, vcenter_name, all_clusters, all_hosts, clusters, hosts, hosts_list, hv_username, hv_password, hv_management_network, hv_data_network, hv_vm_network, hv_mc_network, host_configure_agent, allow_fqdn):
# Checking if the Datacenter exists in the Nuage vCenter Deployment Tool
logger.debug('Checking vCenter Datacenter %s in Nuage vCenter Deployment Tool' % vc_dc.name)
active_nc_dc = None
for nc_dc in nc_dc_list:
if vc_dc.name == nc_dc.name:
active_nc_dc = nc_dc
logger.debug('Found Datacenter %s in Nuage vCenter Deployment Tool' % vc_dc.name)
break
# If the Datacenter does not exist in Nuage vCenter Deployment Tool, create it
if not active_nc_dc:
logger.debug('Datacenter %s not found in the vCenter %s in the Nuage vCenter Deployment Tool, creating' % (vc_dc.name, vcenter_name))
active_nc_dc = vsdk.NUVCenterDataCenter(name=vc_dc.name)
nuage_vcenter.create_child(active_nc_dc)
logger.info('Created Datacenter %s from the vCenter %s in the Nuage vCenter Deployment Tool' % (vc_dc.name, vcenter_name))
# Getting clusters in the current vCenter Datacenter
logger.debug('Gathering all Clusters from the vCenter Datacenter %s' % vc_dc.name)
content = vc.content
obj_view = content.viewManager.CreateContainerView(vc_dc, [vim.ClusterComputeResource], True)
vc_cl_list = obj_view.view
obj_view.Destroy()
# Getting clusters in current Nuage Datacenter
logger.debug('Gathering all Clusters from the Nuage Datacenter %s' % vc_dc.name)
nc_cl_list = active_nc_dc.vcenter_clusters.get()
for vc_cl in vc_cl_list:
if all_clusters or vc_cl.name in clusters:
logger.debug('vCenter Cluster %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_cl.name)
handle_vdt_cluster(logger=logger, nc=nc, vc=vc, vc_dc=vc_dc, vc_cl=vc_cl, nuage_dc=active_nc_dc, nc_cl_list=nc_cl_list, all_hosts=all_hosts, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=<PASSWORD>, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)
def handle_vdt_cluster(logger, nc, vc, vc_dc, vc_cl, nuage_dc, nc_cl_list, all_hosts, hosts, hosts_list, hv_username, hv_password, hv_management_network, hv_data_network, hv_vm_network, hv_mc_network, host_configure_agent, allow_fqdn):
# Checking if the Cluster exists in the Nuage vCenter Deployment Tool
logger.debug('Checking vCenter Cluster %s in Nuage vCenter Deployment Tool' % vc_cl.name)
active_nc_cl = None
for nc_cl in nc_cl_list:
if vc_cl.name == nc_cl.name:
active_nc_cl = nc_cl
logger.debug('Found Cluster %s in Nuage vCenter Deployment Tool' % vc_cl.name)
break
if not active_nc_cl:
logger.debug('Cluster %s not found in the vCenter Datacenter %s in the Nuage vCenter Deployment Tool, creating' % (vc_cl.name, vc_dc.name))
active_nc_cl = vsdk.NUVCenterCluster(name=vc_cl.name)
nuage_dc.create_child(active_nc_cl)
logger.info('Created Cluster %s from the vCenter Datacenter %s in the Nuage vCenter Deployment Tool' % (vc_cl.name, vc_dc.name))
# Getting hosts in the current vCenter Cluster
logger.debug('Gathering all Hosts from the vCenter Cluster %s' % vc_cl.name)
content = vc.content
obj_view = content.viewManager.CreateContainerView(vc_cl, [vim.HostSystem], True)
vc_host_list = obj_view.view
obj_view.Destroy()
# Getting hosts in current Nuage Cluster
logger.debug('Gathering all Hosts from the Nuage Cluster %s' % vc_cl.name)
nc_host_list = active_nc_cl.vcenter_hypervisors.get()
for vc_host in vc_host_list:
if all_hosts:
# Determining Host management IP
vc_host_ip = None
if allow_fqdn:
vc_host_ip = vc_host.name
else:
# Determine management IP based on 'management' property
vnic_mgmtIP_list = []
for vc_host_NicManager in vc_host.config.virtualNicManagerInfo.netConfig:
if vc_host_NicManager.nicType == 'management':
if(len(vc_host_NicManager.selectedVnic) > 0):
for vnic in vc_host_NicManager.candidateVnic:
if vnic.key in vc_host_NicManager.selectedVnic:
if ip_address_is_valid(vnic.spec.ip.ipAddress):
vnic_mgmtIP_list.append(vnic.spec.ip.ipAddress)
break
if len(vnic_mgmtIP_list) > 0:
for vnic_ip in vnic_mgmtIP_list:
if ip_address_is_valid(vnic_ip):
logger.debug('Found managenent IP %s for vCenter Host %s' % (vnic_ip, vc_host.name))
vc_host_ip = vnic_ip
break
else:
# Did not find any Management IP, use first IP
for vnic in vc_host.config.network.vnic:
logger.debug('Checking vnic for Host %s in vCenter Cluster %s' % (vc_host.name, vc_cl.name))
if ip_address_is_valid(vnic.spec.ip.ipAddress):
logger.debug('Found management IP %s for vCenter Host %s' % (vnic.spec.ip.ipAddress, vc_host.name))
vc_host_ip = vnic.spec.ip.ipAddress
break
handle_vdt_host(logger=logger, nc=nc, vc=vc, vc_cl=vc_cl, vc_host=vc_host, vc_host_ip=vc_host_ip, nuage_cl=active_nc_cl, nc_host_list=nc_host_list, hosts_list=hosts_list, hv_username=hv_username, hv_password=<PASSWORD>, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)
elif allow_fqdn and vc_host.name in hosts:
logger.debug('vCenter Host %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_host.name)
handle_vdt_host(logger=logger, nc=nc, vc=vc, vc_cl=vc_cl, vc_host=vc_host, vc_host_ip=vc_host.name, nuage_cl=active_nc_cl, nc_host_list=nc_host_list, hosts_list=hosts_list, hv_username=hv_username, hv_password=<PASSWORD>, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)
else:
# Get all IPs in a list for this host to check if the IP is present in the hosts to add
for vnic in vc_host.config.network.vnic:
logger.debug('Found IP %s for vCenter Host %s' % (vnic.spec.ip.ipAddress, vc_host.name))
if vnic.spec.ip.ipAddress in hosts:
logger.debug('vCenter Host %s with IP %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % (vc_host.name, vnic.spec.ip.ipAddress))
handle_vdt_host(logger=logger, nc=nc, vc=vc, vc_cl=vc_cl, vc_host=vc_host, vc_host_ip=vnic.spec.ip.ipAddress, nuage_cl=active_nc_cl, nc_host_list=nc_host_list, hosts_list=hosts_list, hv_username=hv_username, hv_password=<PASSWORD>, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)
break
def handle_vdt_host(logger, nc, vc, vc_cl, vc_host, vc_host_ip, nuage_cl, nc_host_list, hosts_list, hv_username, hv_password, hv_management_network, hv_data_network, hv_vm_network, hv_mc_network, host_configure_agent, allow_fqdn):
logger.debug('Checking vCenter Host %s in the Nuage vCenter Deployment Tool' % vc_host.name)
active_nc_host = None
for nc_host in nc_host_list:
if vc_host_ip == nc_host.hypervisor_ip:
logger.debug('Found Host with IP %s in the Nuage vCenter Deployment Tool' % vc_host_ip)
active_nc_host = nc_host
break
if not active_nc_host:
logger.debug('Host %s with IP %s not found in the vCenter Cluster %s in the Nuage vCenter Deployment Tool, creating' % (vc_host.name, vc_host_ip, vc_cl.name))
active_nc_host = vsdk.NUVCenterHypervisor(name=vc_host.name, hypervisor_ip=vc_host_ip, hypervisor_user=hv_username, hypervisor_password=<PASSWORD>, mgmt_network_portgroup=hv_management_network, data_network_portgroup=hv_data_network, vm_network_portgroup=hv_vm_network, multicast_source_portgroup=hv_mc_network)
nuage_cl.create_child(active_nc_host)
logger.info('Created Host %s with IP %s from the vCenter Cluster %s in the Nuage vCenter Deployment Tool' % (vc_host.name, vc_host_ip, vc_cl.name))
# Once we come here, we can update the host (circumventing a known issue with the creation of a host not setting its networks)
active_nc_host.mgmt_network_portgroup = hv_management_network
active_nc_host.data_network_portgroup = hv_data_network
active_nc_host.vm_network_portgroup = hv_vm_network
active_nc_host.multicast_source_portgroup = hv_mc_network
# Setting base values for vCenter Host VM Agent configuration in case they are needed
agent_portgroup_name = hv_management_network
agent_datastore_name = None
# if hosts_list is not empty, use those values if they are set, if it is, use the general ones
if hosts_list:
if vc_host_ip in hosts_list:
logger.debug('Host %s with IP %s from the vCenter Cluster %s found in the hosts file. Updating its information from the file' % (vc_host.name, vc_host_ip, vc_cl.name))
row = hosts_list[vc_host_ip]
# 0 - "<IP>" - hypervisor_ip
# 1 - "[name]" - name
if row[1]:
active_nc_host.name = row[1]
# 2 - "[hypervisor user]" - hypervisor_user
if row[2]:
active_nc_host.hypervisor_user = row[2]
# 3 - "[hypervisor password]" - hypervisor_password
if row[3]:
active_nc_host.hypervisor_password = row[3]
# 4 - "[management network portgroup]" - mgmt_network_portgroup
if row[4]:
active_nc_host.mgmt_network_portgroup = row[4]
# 5 - "[data network portgroup]" - data_network_portgroup
if row[5]:
active_nc_host.data_network_portgroup = row[5]
# 6 - "[vm network portgroup]" - vm_network_portgroup
if row[6]:
active_nc_host.vm_network_portgroup = row[6]
# 7 - "[multicast sourece portgroup]" - multicast_source_portgroup
if row[7]:
active_nc_host.multicast_source_portgroup = row[7]
# 8 - "[use management DHCP (True|False)]" - allow_mgmt_dhcp
if row[8].lower() == 'true':
active_nc_host.allow_mgmt_dhcp = True
else:
active_nc_host.allow_mgmt_dhcp = False
# 9 - "[management IP]" - mgmt_ip_address
if row[9] and ip_address_is_valid(row[9]):
active_nc_host.mgmt_ip_address = row[9]
# 10 - "[management netmask (octet structure)]" - mgmt_netmask
if row[10]:
active_nc_host.mgmt_netmask = row[10]
# 11 - "[management gateway]" - mgmt_gateway
if row[11] and ip_address_is_valid(row[11]):
active_nc_host.mgmt_gateway = row[11]
# 12 - "[management DNS 1]" - mgmt_dns1
if row[12] and ip_address_is_valid(row[12]):
active_nc_host.mgmt_dns1 = row[12]
# 13 - "[management DNS 2]" - mgmt_dns2
if row[13] and ip_address_is_valid(row[13]):
| |
describing how dynamic usernames are generated.
"""
return pulumi.get(self, "username_template")
@pulumi.output_type
class SecretBackendConnectionMongodbatlas(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateKey":
suggest = "private_key"
elif key == "projectId":
suggest = "project_id"
elif key == "publicKey":
suggest = "public_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretBackendConnectionMongodbatlas. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretBackendConnectionMongodbatlas.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretBackendConnectionMongodbatlas.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_key: str,
project_id: str,
public_key: str):
"""
:param str private_key: The Private Programmatic API Key used to connect with MongoDB Atlas API.
:param str project_id: The Project ID the Database User should be created within.
:param str public_key: The Public Programmatic API Key used to authenticate with the MongoDB Atlas API.
"""
pulumi.set(__self__, "private_key", private_key)
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "public_key", public_key)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The Private Programmatic API Key used to connect with MongoDB Atlas API.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
"""
The Project ID the Database User should be created within.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> str:
"""
The Public Programmatic API Key used to authenticate with the MongoDB Atlas API.
"""
return pulumi.get(self, "public_key")
@pulumi.output_type
class SecretBackendConnectionMssql(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionUrl":
suggest = "connection_url"
elif key == "maxConnectionLifetime":
suggest = "max_connection_lifetime"
elif key == "maxIdleConnections":
suggest = "max_idle_connections"
elif key == "maxOpenConnections":
suggest = "max_open_connections"
elif key == "usernameTemplate":
suggest = "username_template"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretBackendConnectionMssql. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretBackendConnectionMssql.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretBackendConnectionMssql.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_url: Optional[str] = None,
max_connection_lifetime: Optional[int] = None,
max_idle_connections: Optional[int] = None,
max_open_connections: Optional[int] = None,
username_template: Optional[str] = None):
"""
:param str connection_url: A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
:param int max_connection_lifetime: The maximum number of seconds to keep
a connection alive for.
:param int max_idle_connections: The maximum number of idle connections to
maintain.
:param int max_open_connections: The maximum number of open connections to
use.
:param str username_template: - [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are generated.
"""
if connection_url is not None:
pulumi.set(__self__, "connection_url", connection_url)
if max_connection_lifetime is not None:
pulumi.set(__self__, "max_connection_lifetime", max_connection_lifetime)
if max_idle_connections is not None:
pulumi.set(__self__, "max_idle_connections", max_idle_connections)
if max_open_connections is not None:
pulumi.set(__self__, "max_open_connections", max_open_connections)
if username_template is not None:
pulumi.set(__self__, "username_template", username_template)
@property
@pulumi.getter(name="connectionUrl")
def connection_url(self) -> Optional[str]:
"""
A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
"""
return pulumi.get(self, "connection_url")
@property
@pulumi.getter(name="maxConnectionLifetime")
def max_connection_lifetime(self) -> Optional[int]:
"""
The maximum number of seconds to keep
a connection alive for.
"""
return pulumi.get(self, "max_connection_lifetime")
@property
@pulumi.getter(name="maxIdleConnections")
def max_idle_connections(self) -> Optional[int]:
"""
The maximum number of idle connections to
maintain.
"""
return pulumi.get(self, "max_idle_connections")
@property
@pulumi.getter(name="maxOpenConnections")
def max_open_connections(self) -> Optional[int]:
"""
The maximum number of open connections to
use.
"""
return pulumi.get(self, "max_open_connections")
@property
@pulumi.getter(name="usernameTemplate")
def username_template(self) -> Optional[str]:
"""
- [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are generated.
"""
return pulumi.get(self, "username_template")
@pulumi.output_type
class SecretBackendConnectionMysql(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionUrl":
suggest = "connection_url"
elif key == "maxConnectionLifetime":
suggest = "max_connection_lifetime"
elif key == "maxIdleConnections":
suggest = "max_idle_connections"
elif key == "maxOpenConnections":
suggest = "max_open_connections"
elif key == "tlsCa":
suggest = "tls_ca"
elif key == "tlsCertificateKey":
suggest = "tls_certificate_key"
elif key == "usernameTemplate":
suggest = "username_template"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretBackendConnectionMysql. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretBackendConnectionMysql.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretBackendConnectionMysql.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_url: Optional[str] = None,
max_connection_lifetime: Optional[int] = None,
max_idle_connections: Optional[int] = None,
max_open_connections: Optional[int] = None,
tls_ca: Optional[str] = None,
tls_certificate_key: Optional[str] = None,
username_template: Optional[str] = None):
"""
:param str connection_url: A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
:param int max_connection_lifetime: The maximum number of seconds to keep
a connection alive for.
:param int max_idle_connections: The maximum number of idle connections to
maintain.
:param int max_open_connections: The maximum number of open connections to
use.
:param str tls_ca: x509 CA file for validating the certificate presented by the MySQL server. Must be PEM encoded.
:param str tls_certificate_key: x509 certificate for connecting to the database. This must be a PEM encoded version of the private key and the certificate combined.
:param str username_template: - [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are generated.
"""
if connection_url is not None:
pulumi.set(__self__, "connection_url", connection_url)
if max_connection_lifetime is not None:
pulumi.set(__self__, "max_connection_lifetime", max_connection_lifetime)
if max_idle_connections is not None:
pulumi.set(__self__, "max_idle_connections", max_idle_connections)
if max_open_connections is not None:
pulumi.set(__self__, "max_open_connections", max_open_connections)
if tls_ca is not None:
pulumi.set(__self__, "tls_ca", tls_ca)
if tls_certificate_key is not None:
pulumi.set(__self__, "tls_certificate_key", tls_certificate_key)
if username_template is not None:
pulumi.set(__self__, "username_template", username_template)
@property
@pulumi.getter(name="connectionUrl")
def connection_url(self) -> Optional[str]:
"""
A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
"""
return pulumi.get(self, "connection_url")
@property
@pulumi.getter(name="maxConnectionLifetime")
def max_connection_lifetime(self) -> Optional[int]:
"""
The maximum number of seconds to keep
a connection alive for.
"""
return pulumi.get(self, "max_connection_lifetime")
@property
@pulumi.getter(name="maxIdleConnections")
def max_idle_connections(self) -> Optional[int]:
"""
The maximum number of idle connections to
maintain.
"""
return pulumi.get(self, "max_idle_connections")
@property
@pulumi.getter(name="maxOpenConnections")
def max_open_connections(self) -> Optional[int]:
"""
The maximum number of open connections to
use.
"""
return pulumi.get(self, "max_open_connections")
@property
@pulumi.getter(name="tlsCa")
def tls_ca(self) -> Optional[str]:
"""
x509 CA file for validating the certificate presented by the MySQL server. Must be PEM encoded.
"""
return pulumi.get(self, "tls_ca")
@property
@pulumi.getter(name="tlsCertificateKey")
def tls_certificate_key(self) -> Optional[str]:
"""
x509 certificate for connecting to the database. This must be a PEM encoded version of the private key and the certificate combined.
"""
return pulumi.get(self, "tls_certificate_key")
@property
@pulumi.getter(name="usernameTemplate")
def username_template(self) -> Optional[str]:
"""
- [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are generated.
"""
return pulumi.get(self, "username_template")
@pulumi.output_type
class SecretBackendConnectionMysqlAurora(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionUrl":
suggest = "connection_url"
elif key == "maxConnectionLifetime":
suggest = "max_connection_lifetime"
elif key == "maxIdleConnections":
suggest = "max_idle_connections"
elif key == "maxOpenConnections":
suggest = "max_open_connections"
elif key == "usernameTemplate":
suggest = "username_template"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretBackendConnectionMysqlAurora. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretBackendConnectionMysqlAurora.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretBackendConnectionMysqlAurora.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_url: Optional[str] = None,
max_connection_lifetime: Optional[int] = None,
max_idle_connections: Optional[int] = None,
max_open_connections: Optional[int] = None,
username_template: Optional[str] = None):
"""
:param str connection_url: A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
:param int max_connection_lifetime: The maximum number of seconds to keep
a connection alive for.
:param int max_idle_connections: The maximum number of idle connections to
maintain.
:param int max_open_connections: The maximum number of open connections to
use.
:param str username_template: - [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are generated.
"""
if connection_url is not None:
pulumi.set(__self__, "connection_url", connection_url)
if max_connection_lifetime is not None:
pulumi.set(__self__, "max_connection_lifetime", max_connection_lifetime)
if max_idle_connections is not None:
pulumi.set(__self__, "max_idle_connections", max_idle_connections)
if max_open_connections is not None:
pulumi.set(__self__, "max_open_connections", max_open_connections)
if username_template is not None:
pulumi.set(__self__, "username_template", username_template)
@property
@pulumi.getter(name="connectionUrl")
def connection_url(self) -> Optional[str]:
"""
A URL containing connection information. See
the [Vault
docs](https://www.vaultproject.io/api-docs/secret/databases/snowflake#sample-payload)
for an example.
"""
return pulumi.get(self, "connection_url")
@property
@pulumi.getter(name="maxConnectionLifetime")
def max_connection_lifetime(self) -> Optional[int]:
"""
The maximum number of seconds to keep
a connection alive for.
"""
return pulumi.get(self, "max_connection_lifetime")
@property
@pulumi.getter(name="maxIdleConnections")
def max_idle_connections(self) -> Optional[int]:
"""
The maximum number of idle connections to
maintain.
"""
return pulumi.get(self, "max_idle_connections")
@property
@pulumi.getter(name="maxOpenConnections")
def max_open_connections(self) -> Optional[int]:
"""
The maximum number of open connections to
use.
"""
return pulumi.get(self, "max_open_connections")
@property
@pulumi.getter(name="usernameTemplate")
def username_template(self) -> Optional[str]:
"""
- [Template](https://www.vaultproject.io/docs/concepts/username-templating) describing how dynamic usernames are | |
<reponame>SX-Aurora/nlcpy
import functools
import unittest
import pytest # NOQA
import numpy as np
from numpy.testing import assert_allclose
import nlcpy # NOQA
from nlcpy import testing
signed_int_types = [np.int32, np.int64]
unsigned_int_types = [np.uint32, np.uint64]
int_types = signed_int_types + unsigned_int_types
global enable_nd_planning
enable_nd_planning = True
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
# get original global planning state
global enable_nd_planning
planning_state = enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
enable_nd_planning = planning_state
return test_func
return decorator
def _numpy_fftn_correct_dtype(xp, a):
if xp == np and a.dtype in int_types + [np.bool]:
a = xp.asarray(a, dtype=np.float64)
return a
def _size_last_transform_axis(shape, s, axes):
if s is not None:
if s[-1] is not None:
return s[-1]
elif axes is not None:
return shape[axes[-1]]
return shape[-1]
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.with_requires('numpy>=1.10.0')
class TestFft2(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.for_orders("CF")
@testing.numpy_nlcpy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, order, enable_nd):
global enable_nd_planning
assert enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
a = xp.asarray(a, order=order)
a = _numpy_fftn_correct_dtype(xp, a)
out = xp.fft.fft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.for_orders("CF")
@testing.numpy_nlcpy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, order, enable_nd):
global enable_nd_planning
assert enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
a = xp.asarray(a, order=order)
a = _numpy_fftn_correct_dtype(xp, a)
out = xp.fft.ifft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
class TestFft2DInvalidParam(object):
@pytest.mark.parametrize('a', (1, 1 + 2j,
["aaa"], [],
("aaa",), (),
))
def test_fft2_param_array(self, a):
with pytest.raises(ValueError):
nlcpy.fft.fft2(a)
@pytest.mark.parametrize('a', (
[[1, 2], [3, "4"]],
((1, 2), (3, "4")),
([1, 2], [3, "4"]), [(1, 2), (3, "4")],
[[1, 2], (3, "4")], ((1, 2), [3, "4"]),))
def test_fft2_param_array_U21(self, a):
if np.__version__ < np.lib.NumpyVersion('1.19.0'):
with pytest.raises(ValueError):
nlcpy.fft.fft2(a)
else:
assert_allclose(nlcpy.fft.fft2(a), np.fft.fft2(a))
@pytest.mark.parametrize('a', (1, 1 + 2j,
["aaa"], [],
("aaa",), (),
))
def test_ifft2_param_array(self, a):
with pytest.raises(ValueError):
nlcpy.fft.ifft2(a)
@pytest.mark.parametrize('a', (
[[1, 2], [3, "4"]],
((1, 2), (3, "4")),
([1, 2], [3, "4"]), [(1, 2), (3, "4")],
[[1, 2], (3, "4")], ((1, 2), [3, "4"]),))
def test_ifft2_param_array_U21(self, a):
if np.__version__ < np.lib.NumpyVersion('1.19.0'):
with pytest.raises(ValueError):
nlcpy.fft.ifft2(a)
else:
assert_allclose(nlcpy.fft.ifft2(a), np.fft.ifft2(a))
@pytest.mark.parametrize('param', (
([[1, 2, 3], [4, 5, 6]], (-1, -3)),
([[1, 2, 3], [4, 5, 6]], (0, 2)),
([[1, 2, 3], [4, 5, 6]], (0, 0, 0, 0, 5)),
([[1, 2, 3], [4, 5, 6]], (5, 0, 0, 0, 0)),
))
def test_fft2_param_axes(self, param):
with pytest.raises(ValueError):
nlcpy.fft.fft2(param[0], axes=param[1])
@pytest.mark.parametrize('param', (
([[1, 2, 3], [4, 5, 6]], (-1, -3)),
([[1, 2, 3], [4, 5, 6]], (0, 2)),
([[1, 2, 3], [4, 5, 6]], (0, 0, 0, 0, 5)),
([[1, 2, 3], [4, 5, 6]], (5, 0, 0, 0, 0)),
))
def test_ifft2_param_axes(self, param):
with pytest.raises(ValueError):
nlcpy.fft.ifft2(param[0], axes=param[1])
@pytest.mark.parametrize('s', (1, 1 + 2j, ""))
def test_fft2_param_s_TypeError(self, s):
with pytest.raises(TypeError):
nlcpy.fft.fft2([[1, 2, 3], [4, 5, 6]], s=s)
@pytest.mark.parametrize('s', ([0, 1], [], [-1], [""], (0, 1), (), (-1, ), ("",)))
def test_fft2_param_s_ValueError(self, s):
with pytest.raises(ValueError):
nlcpy.fft.fft2([[1, 2, 3], [4, 5, 6]], s=s)
@pytest.mark.parametrize('s', (1, 1 + 2j, ""))
def test_ifft2_param_s_TypeError(self, s):
with pytest.raises(TypeError):
nlcpy.fft.ifft2([[1, 2, 3], [4, 5, 6]], s=s)
@pytest.mark.parametrize('s', ([0, 1], [], [-1], [""], (0, 1), (), (-1, ), ("",)))
def test_ifft2_param_s_ValueError(self, s):
with pytest.raises(ValueError):
nlcpy.fft.ifft2([[1, 2, 3], [4, 5, 6]], s=s)
@pytest.mark.parametrize('norm', (None, 'ortho'))
@pytest.mark.parametrize('param', (
((2, 3), (0, 1, 2)),
((2,), (0, 1, 2)),
((2,), (0, 1))
))
def test_fft2_invalid_axes_s(self, param, norm):
a = nlcpy.arange(24).reshape(2, 3, 4)
with pytest.raises(ValueError):
nlcpy.fft.fft2(a, s=param[0], axes=param[1], norm=norm)
@pytest.mark.parametrize('norm', (None, 'ortho'))
@pytest.mark.parametrize('param', (
((2, 3), (0, 1, 2)),
((2,), (0, 1, 2)),
((2,), (0, 1))
))
def test_ifft2_invalid_axes_s(self, param, norm):
a = nlcpy.arange(24).reshape(2, 3, 4)
with pytest.raises(ValueError):
nlcpy.fft.ifft2(a, s=param[0], axes=param[1], norm=norm)
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 10, 4), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (4, 1, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (1, 10, 4), 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (4, 1, 10), 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -1, -2), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3, -2), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -1, -2), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3, -2), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (0, 1, 2, 3), 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (3, 2, 1, 0), 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (4, 3, 2, 1), 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (1, 2, 3, 4), 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (0, 2, 1, 3), 'norm': None},
{'shape': (2, 3, 4, 5, 6), 's': None, 'axes': (4, 2, 3, 1), 'norm': None},
)
@testing.with_requires('numpy>=1.10.0')
class TestFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.for_orders("CF")
@testing.numpy_nlcpy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, order, enable_nd):
global enable_nd_planning
assert enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
a = xp.asarray(a, order=order)
a = _numpy_fftn_correct_dtype(xp, a)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.for_orders("CF")
@testing.numpy_nlcpy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, order, enable_nd):
global enable_nd_planning
assert enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
a = xp.asarray(a, order=order)
a = _numpy_fftn_correct_dtype(xp, a)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': | |
from sklearn.linear_model import LinearRegression
import xgboost
import shap
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn import preprocessing, impute
import numpy as np
from sklearn.decomposition import PCA
class ProcessingBlock:
def __init__(self):
"""[Initaties the pre-processing block. Uses `sklearn.StandardScaler`
for standardization of inputs and `sklearn.SimpleImputer`
for imputing missing values]
"""
print("Processing Block Constructed")
self.X_scaler = preprocessing.StandardScaler()
self.y_scaler = preprocessing.StandardScaler()
self.imputer = impute.SimpleImputer(
missing_values=np.nan, strategy="most_frequent"
)
def fit(self, X, y):
"""[Stores the given X,y data in the object fields X and y]
Args:
X ([np.array or pd.DataFrame]): [Input data]
y ([np.array or pd.DataFrame]): [Input labels]
Returns:
[self]: [returns the object itselt]
"""
self.X = X
self.y = y
return self
def split_data(self, X, y=None, test_split=0.2, scale=False):
"""[Splits the data into training and test set]
Args:
X ([np.array or pd.DataFrame]): [Input data]
y ([np.array or pd.DataFrame], optional): [Input labels.]. Defaults to None.
test_split (float, optional): [Test data split size]. Defaults to 0.2.
scale (bool, optional): [Keyword to enable standardization of data]. Defaults to False.
Returns:
[np.array or pd.DataFrame]: [If `y` is given, returns X_train,X_test,y_train,y_test
Otherwise returns X_train,X_test]
"""
if scale:
X = self.X_scaler.fit_transform(X)
if y is not None:
X_df = pd.DataFrame(X)
X_train, X_val, y_train, y_val = train_test_split(
X_df, y, test_size=test_split, random_state=0
)
self.train_idx = X_train.index
self.val_idx = X_val.index
return X_train, X_val, y_train, y_val
else:
X_df = pd.DataFrame(X)
X_train, X_val = train_test_split(
X_df, test_size=test_split, random_state=0
)
self.train_idx = X_train.index
self.val_idx = X_val.index
# X_train.reset_index(inplace = True)
# X_train = X_train.drop(['index'],axis = 1)
# X_val.reset_index(inplace = True)
# X_val = X_val.drop(['index'],axis = 1)
return X_train, X_val
def impute_data(self, X, y=None, scale=False):
"""[Imputes missing instances in given data]
Args:
X ([np.array or pd.DataFrame]): [Input data]
y ([np.array or pd.DataFrame], optional): [Input labels. If not given in arguments,
can be used to impute test data]. Defaults to None.
scale ([bool],optional) : [Standardizes the dava if True]. Defaults to False.
Returns:
[np.array or pd.DataFrame]: [If y is given, returns X and y with imputed values. Else, returns only X with imputed values]
"""
if scale:
X = self.X_scaler.transform(X)
if y is None:
X = self.imputer.fit_transform(X)
X = pd.DataFrame(X)
return X
else:
X = self.imputer.fit_transform(X)
y = self.imputer.fit_transform(y.reshape(-1, 1))
X = pd.DataFrame(X)
return X, y
class ExplainerBlock:
def __init__(self, explainer_type, params=None, kwargs=None):
"""[Instantiates explainer block object]
Args:
explainer_type ([str]): [Explainer model type. Currently `Linear` and `XGBoost` is supported]
params ([dict], optional): [XGBoost parameters]. Defaults to None.
kwargs ([dict], optional): [XGBoost keyword-arguments]. Defaults to None.
"""
print("Shapley Explainer Constructed")
self.explainer_type = explainer_type
self.eval_results = {}
self.base_model = None
if params is None:
self.explainer_params = {
"eta": 0.05,
"max_depth": 3,
"objective": "reg:squarederror",
"subsample": 0.7,
"eval_metric": "rmse",
"lambda": 0.1,
}
else:
self.explainer_params = params
if kwargs is None:
self.keyword_args = {
"num_boost_round": 5000,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 200,
}
else:
self.keyword_args = kwargs
def fit(self, X_exp, y_exp, X_train, y_train, X_val, y_val):
"""[Trains a model over the input data and constructs the Explainer object using trained model]
Args:
X_exp ([np.array or pd.DataFrame]): [Input data of which Shapley values are calculated]
y_exp ([np.array or pd.DataFrame]): [Input labels]
X_train ([np.array or pd.DataFrame]): [Train partition of input data]
y_train ([np.array or pd.DataFrame]): [Train partition of input labels]
X_val ([np.array or pd.DataFrame]): [Test partition of input data]
y_val ([np.array or pd.DataFrame]): [Test partition of input labels]
Returns:
[self]: [Returns model itself with `explainer_model` and `base_model`]
"""
if self.explainer_type == "Linear":
self.base_model = LinearRegression().fit(X_exp, y_exp)
else:
eval = [
(xgboost.DMatrix(X_train, label=y_train), "train"),
(xgboost.DMatrix(X_val, label=y_val), "val"),
]
self.base_model = xgboost.train(
self.explainer_params,
xgboost.DMatrix(X_train, label=y_train),
evals=eval,
**self.keyword_args
)
if self.explainer_type == "Linear":
self.explainer = shap.LinearExplainer(
self.base_model, X_exp, feature_dependence="independent"
)
else:
self.explainer = shap.TreeExplainer(self.base_model)
return self
def transform(self, X):
"""[Transforms input features to Shapley values]
Args:
X ([np.array or pd.DataFrame]): [Input features]
Returns:
[np.array]: [Shapley values of input features]
"""
shapley_values = self.explainer.shap_values(X)
return shapley_values
def fit_transform(self, X, y, X_train, y_train, X_val, y_val):
"""[Fit and transform combined. Shortcut method if one wants to use `fit` and `transform`
directly after each other]
"""
self.fit(X, y, X_train, y_train, X_val, y_val)
shapley_values = self.transform(X)
return shapley_values
def predict(self, X):
"""[Uses the trained model in explainer to make predictions]
Args:
X ([np.array or pd.DataFrame]): [Input features]
Returns:
[np.array]: [Predictions]
"""
if self.explainer_type == "Linear":
y_pred = self.base_model.predict(X)
if self.explainer_type == "XGBoost":
y_pred = self.base_model.predict(xgboost.DMatrix(X))
return y_pred
class ClusterBlock:
def __init__(self, nClusters, training_set_model, test_set_model):
"""[Instantiates the cluster block object]
Args:
nClusters ([int]): [Number of clusters]
training_set_model ([Model]): [Unsupervised clustering algorithm (K-Means suggested)]
test_set_model ([Model]): [Supervised classification algorithm(K-NN suggested) ]
"""
self.n_clusters = nClusters
self.training_set_model = training_set_model
self.test_set_model = test_set_model
def fit(self, X, y):
"""[Currently not used]
Args:
X ([np.array or pd.DataFrame]): [Input features]
y ([np.array or pd.DataFrame]): [Input labels]
Returns:
[self]: [ClusterBlock object]
"""
self.X = X
self.y = y
return self
def transform(self):
"""[Currently unused]
"""
pass
def cluster_training_instances(self, X):
"""[Executes the unsupervised clustering algorithm
(defined by user at object construction) over input features]
Args:
X ([np.array or pd.DataFrame]): [Input features]
Returns:
[np.array]: [Cluster labels of instances]
"""
self.training_set_model.fit(X)
return self.training_set_model.labels_
def cluster_test_instances(self, X, X_test):
"""[Executes the supervised classification algorithm
(defined by user at object construction) over input features]
Args:
X ([np.array or pd.DataFrame]): [Input features that are used in clustering algorithm]
X_test ([np.array or pd.DataFrame]): [Test features to be classified]
Returns:
[np.array]: [Cluster label predictions of test instances]
"""
self.test_set_model.fit(X, self.training_set_model.labels_)
prediction = self.test_set_model.predict(X_test)
return prediction
class EnsembleBlock:
def __init__(self, model_type, params=None, keyword_args=None):
"""[Instantiates the EnsembleBlock object]
Args:
model_type ([str]): [Currently 'Linear' or 'XGBoost' accepted]
params ([dict], optional): [Parameters for custom XGBoost models]. Defaults to None.
keyword_args ([dict], optional): [Keyword arguments for custom XGBoost models]. Defaults to None.
"""
self.eval_dict = {}
self.model_dict = {}
self.model_type = model_type
if params is None:
self.ensemble_params = {
"eta": 0.05,
"max_depth": 3,
"objective": "reg:squarederror",
"subsample": 0.7,
"eval_metric": "rmse",
"lambda": 0.1,
}
else:
self.ensemble_params = params
if keyword_args is None:
self.keyword_args = {
"num_boost_round": 5000,
"verbose_eval": 0,
"evals_result": {},
"early_stopping_rounds": 200,
}
else:
self.keyword_args = keyword_args
def fit(self):
"""[Currently unused]
"""
pass
def train(self, X_train, X_val, y_train, y_val, cluster_labels):
"""[Trains the ensemble learner over training data]
Args:
X_train ([np.array or pd.DataFrame]): [Training set]
X_val ([np.array or pd.DataFrame]): [Validation set]
y_train ([np.array or pd.DataFrame]): [Training labels]
y_val ([np.array or pd.DataFrame]): [Validation labels]
cluster_labels ([np.array]): [Cluster labels of instances (assigned by ClusterBlock)]
"""
if self.model_type == "Linear":
for i in range(len(np.unique(cluster_labels))):
c_idx = cluster_labels == i
X_train_cluster = X_train[c_idx[X_train.index]]
y_train_cluster = y_train[c_idx[X_train.index]]
X_val_cluster = X_val[c_idx[X_val.index]]
y_val_cluster = y_val[c_idx[X_val.index]]
self.model_dict["model{0}".format(i)] = LinearRegression().fit(
X_train_cluster, y_train_cluster
)
# self.eval_dict['eval{0}'.format(i)] = {'train': {'rmse': _calculate_accuracy(model_dict['model{0}'.format(i)].predict,X_val_cluster,y_val_cluster)}}
if self.model_type == "XGBoost":
self.keyword_args["evals_result"] = {}
for i in range(len(np.unique(cluster_labels))):
c_idx = cluster_labels == i
X_train_cluster = X_train[c_idx[X_train.index]]
y_train_cluster = y_train[c_idx[X_train.index]]
X_val_cluster = X_val[c_idx[X_val.index]]
y_val_cluster = y_val[c_idx[X_val.index]]
if not y_val_cluster.size == 0:
dtrain = xgboost.DMatrix(X_train_cluster, label=y_train_cluster)
eval = [
(
xgboost.DMatrix(X_train_cluster, label=y_train_cluster),
"train",
),
(xgboost.DMatrix(X_val_cluster, label=y_val_cluster), "val"),
]
self.model_dict["model{0}".format(i)] = xgboost.train(
self.ensemble_params, dtrain, evals=eval, **self.keyword_args
)
self.eval_dict["eval{0}".format(i)] = self.keyword_args[
"evals_result"
]
else:
dtrain = xgboost.DMatrix(X_train_cluster, label=y_train_cluster)
eval = [
(
xgboost.DMatrix(X_train_cluster, label=y_train_cluster),
"train",
)
]
self.model_dict["model{0}".format(i)] = xgboost.train(
self.ensemble_params, dtrain, evals=eval, **self.keyword_args
)
self.eval_dict["eval{0}".format(i)] = self.keyword_args[
"evals_result"
]
def predict(self, X_test, cluster_labels):
"""[Predicts target values of test instances]
Args:
X_test ([np.array or pd.DataFrame]): [Test set]
cluster_labels ([np.array]): [Cluster labels of instances (assigned by cluster block)]
Returns:
[np.array]: [Predictions of the ensemble model]
"""
y_pred = np.zeros(shape=(X_test.shape[0],))
for i in range(len(np.unique(cluster_labels))):
if (cluster_labels == i).any():
if self.model_type == "Linear":
y_pred[cluster_labels == i] = (
self.model_dict["model{0}".format(i)]
.predict(X_test[cluster_labels == i])
.reshape(-1,)
)
else:
y_pred[cluster_labels == i] = self.model_dict[
"model{0}".format(i)
].predict(xgboost.DMatrix(X_test[cluster_labels == i]))
else:
continue
return y_pred
class ReduceBlock:
def __init__(self, reduce_model):
"""[Instantiates the ReduceBlock object]
Args:
reduce_model ([Model]): [Dimensionality reduction algorithm. Currently unused and this block uses PCA as default algorithm.]
"""
# print('Dimensionality Reduction Block Constructed')
self.reduce_model = reduce_model
def fit(self, X):
"""[Fits the dimensionality reduction model to input data.]
Args:
X ([np.array or pd.DataFrame]): [Input features]
"""
explained_var_ratio = 0
k = 1
n_features = X.shape[1]
while explained_var_ratio < 0.95:
k += 1
pca = PCA(n_components=min(k, n_features))
pca.fit(X)
explained_var_ratio | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def forward_and_get_hidden_state_step(self, sample, model):
# add by
# forward the model with the sample, and get the decoder hidden state used for datastore
# and we only need the feature
decoder_output, extra = model(src_tokens=sample['net_input']['src_tokens'],
src_lengths=sample['net_input']['src_lengths'],
prev_output_tokens=sample['net_input']['prev_output_tokens'],
return_all_hiddens=False,
features_only=True)
return decoder_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, | |
# -*- coding: utf-8 -*-
import base64
import hashlib
import json
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import traceback
import urlparse
import uuid
import zipfile
from django import forms
from django.conf import settings
import requests
from appvalidator import validate_app, validate_packaged_app
from celery import task
from django_statsd.clients import statsd
from PIL import Image
from tower import ugettext as _
import mkt
from lib.post_request_task.task import task as post_request_task
from mkt.constants import APP_PREVIEW_SIZES
from mkt.constants.regions import REGIONS_CHOICES_ID_DICT
from mkt.files.models import File, FileUpload, FileValidation
from mkt.files.utils import SafeUnzip
from mkt.site.decorators import set_modified_on, use_master
from mkt.site.helpers import absolutify
from mkt.site.mail import send_mail_jinja
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage, public_storage)
from mkt.site.utils import (remove_icons, remove_promo_imgs, resize_image,
strip_bom)
from mkt.webapps.models import AddonExcludedRegion, Preview, Webapp
from mkt.webapps.utils import iarc_get_app_info
log = logging.getLogger('z.mkt.developers.task')
CT_URL = (
'https://developer.mozilla.org/docs/Web/Apps/Manifest#Serving_manifests'
)
REQUESTS_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Mobile; rv:18.0) Gecko/18.0 Firefox/18.0'
}
@post_request_task
@use_master
def validator(upload_id, **kw):
if not settings.VALIDATE_ADDONS:
return None
log.info(u'[FileUpload:%s] Validating app.' % upload_id)
try:
upload = FileUpload.objects.get(pk=upload_id)
except FileUpload.DoesNotExist:
log.info(u'[FileUpload:%s] Does not exist.' % upload_id)
return
try:
validation_result = run_validator(upload.path, url=kw.get('url'))
if upload.validation:
# If there's any preliminary validation result, merge it with the
# actual validation result.
dec_prelim_result = json.loads(upload.validation)
if 'prelim' in dec_prelim_result:
dec_validation_result = json.loads(validation_result)
# Merge the messages.
dec_validation_result['messages'] += (
dec_prelim_result['messages'])
# Merge the success value.
if dec_validation_result['success']:
dec_validation_result['success'] = (
dec_prelim_result['success'])
# Merge the error count (we only raise errors, not warnings).
dec_validation_result['errors'] += dec_prelim_result['errors']
# Put the validation result back into JSON.
validation_result = json.dumps(dec_validation_result)
upload.validation = validation_result
upload.save() # We want to hit the custom save().
except Exception:
# Store the error with the FileUpload job, then raise
# it for normal logging.
tb = traceback.format_exception(*sys.exc_info())
upload.update(task_error=''.join(tb))
# Don't raise if we're being eager, setting the error is enough.
if not settings.CELERY_ALWAYS_EAGER:
raise
@task
@use_master
def file_validator(file_id, **kw):
if not settings.VALIDATE_ADDONS:
return None
log.info(u'[File:%s] Validating file.' % file_id)
try:
file = File.objects.get(pk=file_id)
except File.DoesNotExist:
log.info(u'[File:%s] Does not exist.' % file_id)
return
# Unlike upload validation, let the validator raise an exception if there
# is one.
result = run_validator(file.file_path, url=file.version.addon.manifest_url)
return FileValidation.from_json(file, result)
def run_validator(file_path, url=None):
"""A pre-configured wrapper around the app validator."""
temp_path = None
# Make a copy of the file since we can't assume the
# uploaded file is on the local filesystem.
temp_path = tempfile.mktemp()
copy_stored_file(
file_path, temp_path,
src_storage=private_storage, dst_storage=local_storage)
with statsd.timer('mkt.developers.validator'):
is_packaged = zipfile.is_zipfile(temp_path)
if is_packaged:
log.info(u'Running `validate_packaged_app` for path: %s'
% (file_path))
with statsd.timer('mkt.developers.validate_packaged_app'):
return validate_packaged_app(
temp_path,
market_urls=settings.VALIDATOR_IAF_URLS,
timeout=settings.VALIDATOR_TIMEOUT,
spidermonkey=settings.SPIDERMONKEY)
else:
log.info(u'Running `validate_app` for path: %s' % (file_path))
with statsd.timer('mkt.developers.validate_app'):
return validate_app(open(temp_path).read(),
market_urls=settings.VALIDATOR_IAF_URLS,
url=url)
# Clean up copied files.
os.unlink(temp_path)
def _hash_file(fd):
return hashlib.md5(fd.read()).hexdigest()[:8]
@post_request_task
@use_master
@set_modified_on
def resize_icon(src, dst, sizes, src_storage=private_storage,
dst_storage=public_storage, **kw):
"""Resizes addon/websites icons."""
log.info('[1@None] Resizing icon: %s' % dst)
try:
for s in sizes:
size_dst = '%s-%s.png' % (dst, s)
resize_image(src, size_dst, (s, s), remove_src=False,
src_storage=src_storage, dst_storage=dst_storage)
pngcrush_image.delay(size_dst, storage=dst_storage, **kw)
with src_storage.open(src) as fd:
icon_hash = _hash_file(fd)
src_storage.delete(src)
log.info('Icon resizing completed for: %s' % dst)
return {'icon_hash': icon_hash}
except Exception, e:
log.error("Error resizing icon: %s; %s" % (e, dst))
@post_request_task
@use_master
@set_modified_on
def resize_promo_imgs(src, dst, sizes, **kw):
"""Resizes webapp/website promo imgs."""
log.info('[1@None] Resizing promo imgs: %s' % dst)
try:
for s in sizes:
size_dst = '%s-%s.png' % (dst, s)
# Crop only to the width, keeping the aspect ratio.
resize_image(src, size_dst, (s, 0), remove_src=False)
pngcrush_image.delay(size_dst, **kw)
with private_storage.open(src) as fd:
promo_img_hash = _hash_file(fd)
private_storage.delete(src)
log.info('Promo img hash resizing completed for: %s' % dst)
return {'promo_img_hash': promo_img_hash}
except Exception, e:
log.error("Error resizing promo img hash: %s; %s" % (e, dst))
@task
@use_master
@set_modified_on
def pngcrush_image(src, hash_field='image_hash', storage=public_storage, **kw):
"""
Optimizes a PNG image by running it through Pngcrush. Returns hash.
src -- filesystem image path
hash_field -- field name to save the new hash on instance if passing
instance through set_modified_on
"""
log.info('[1@None] Optimizing image: %s' % src)
tmp_src = tempfile.NamedTemporaryFile(suffix='.png')
with storage.open(src) as srcf:
shutil.copyfileobj(srcf, tmp_src)
tmp_src.seek(0)
try:
# pngcrush -ow has some issues, use a temporary file and do the final
# renaming ourselves.
suffix = '.opti.png'
tmp_path = '%s%s' % (os.path.splitext(tmp_src.name)[0], suffix)
cmd = [settings.PNGCRUSH_BIN, '-q', '-rem', 'alla', '-brute',
'-reduce', '-e', suffix, tmp_src.name]
sp = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sp.communicate()
if sp.returncode != 0:
log.error('Error optimizing image: %s; %s' % (src, stderr.strip()))
kw['storage'] = storage
pngcrush_image.retry(args=[src], kwargs=kw, max_retries=3)
return False
# Return hash for set_modified_on.
with open(tmp_path) as fd:
image_hash = _hash_file(fd)
copy_stored_file(tmp_path, src, src_storage=local_storage,
dst_storage=storage)
log.info('Image optimization completed for: %s' % src)
os.remove(tmp_path)
tmp_src.close()
return {
hash_field: image_hash
}
except Exception, e:
log.error('Error optimizing image: %s; %s' % (src, e))
return {}
@post_request_task
@use_master
@set_modified_on
def resize_preview(src, pk, **kw):
"""Resizes preview images and stores the sizes on the preview."""
instance = Preview.objects.get(pk=pk)
thumb_dst, full_dst = instance.thumbnail_path, instance.image_path
sizes = instance.sizes or {}
log.info('[1@None] Resizing preview and storing size: %s' % thumb_dst)
try:
thumbnail_size = APP_PREVIEW_SIZES[0][:2]
image_size = APP_PREVIEW_SIZES[1][:2]
with private_storage.open(src, 'rb') as fp:
size = Image.open(fp).size
if size[0] > size[1]:
# If the image is wider than tall, then reverse the wanted size
# to keep the original aspect ratio while still resizing to
# the correct dimensions.
thumbnail_size = thumbnail_size[::-1]
image_size = image_size[::-1]
if kw.get('generate_thumbnail', True):
sizes['thumbnail'] = resize_image(src, thumb_dst,
thumbnail_size,
remove_src=False)
if kw.get('generate_image', True):
sizes['image'] = resize_image(src, full_dst,
image_size,
remove_src=False)
instance.sizes = sizes
instance.save()
log.info('Preview resized to: %s' % thumb_dst)
# Remove src file now that it has been processed.
private_storage.delete(src)
return True
except Exception, e:
log.error("Error saving preview: %s; %s" % (e, thumb_dst))
def _fetch_content(url):
with statsd.timer('developers.tasks.fetch_content'):
try:
res = requests.get(url, timeout=30, stream=True,
headers=REQUESTS_HEADERS)
if not 200 <= res.status_code < 300:
statsd.incr('developers.tasks.fetch_content.error')
raise Exception('An invalid HTTP status code was returned.')
if not res.headers.keys():
statsd.incr('developers.tasks.fetch_content.error')
raise Exception('The HTTP server did not return headers.')
statsd.incr('developers.tasks.fetch_content.success')
return res
except requests.RequestException as e:
statsd.incr('developers.tasks.fetch_content.error')
log.error('fetch_content connection error: %s' % e)
raise Exception('The file could not be retrieved.')
class ResponseTooLargeException(Exception):
pass
def get_content_and_check_size(response, max_size):
# Read one extra byte. Reject if it's too big so we don't have issues
# downloading huge files.
content = response.iter_content(chunk_size=max_size + 1).next()
if len(content) > max_size:
raise ResponseTooLargeException('Too much data.')
return content
def save_icon(obj, icon_content):
"""
Saves the icon for `obj` to its final destination. `obj` can be an app or a
website.
"""
tmp_dst = os.path.join(settings.TMP_PATH, 'icon', uuid.uuid4().hex)
with public_storage.open(tmp_dst, 'wb') as fd:
fd.write(icon_content)
dirname = obj.get_icon_dir()
destination = os.path.join(dirname, '%s' % obj.pk)
remove_icons(destination)
icon_hash = resize_icon(tmp_dst, destination, mkt.CONTENT_ICON_SIZES,
set_modified_on=[obj], src_storage=public_storage,
dst_storage=public_storage)
# Need to set icon type so .get_icon_url() works normally
# submit step 4 does it through AppFormMedia, but we want to beat them to
# the punch. resize_icon outputs pngs so we know it's 'image/png'.
obj.icon_hash = icon_hash['icon_hash'] # In case, we're running not async.
try:
obj.icon_type = 'image/png'
except AttributeError:
# icon_type can be just a @property on models that only implement png.
pass
obj.save()
def save_promo_imgs(obj, img_content):
"""
Saves the promo image for `obj` to its final destination.
`obj` can be an app or a website.
"""
tmp_dst = os.path.join(settings.TMP_PATH, 'promo_imgs', uuid.uuid4().hex)
with private_storage.open(tmp_dst, 'wb') as fd:
fd.write(img_content)
dirname = obj.get_promo_img_dir()
destination = os.path.join(dirname, '%s' % obj.pk)
remove_promo_imgs(destination)
resize_promo_imgs(
tmp_dst, destination, mkt.PROMO_IMG_SIZES,
set_modified_on=[obj])
@post_request_task
@use_master
def fetch_icon(pk, file_pk=None, **kw):
"""
Downloads a webapp icon from the location specified in the manifest.
Returns False if icon was not able to be retrieved
If `file_pk` is not provided it will use the file from the app's
`current_version`.
"""
webapp = Webapp.objects.get(pk=pk)
log.info(u'[1@None] Fetching icon for webapp %s.' % webapp.name)
if file_pk:
file_obj = File.objects.get(pk=file_pk)
else:
file_obj = (webapp.current_version and
webapp.current_version.all_files[0])
manifest = webapp.get_manifest_json(file_obj)
if not manifest or 'icons' not in manifest:
# Set the icon type to empty.
webapp.update(icon_type='')
return
try:
biggest = max(int(size) for size in manifest['icons'])
except ValueError:
log.error('No icon to fetch for webapp "%s"' % webapp.name)
return False
icon_url = manifest['icons'][str(biggest)]
if icon_url.startswith('data:image'):
image_string = icon_url.split('base64,')[1]
content = base64.decodestring(image_string)
else:
if webapp.is_packaged:
# Get icons from package.
if icon_url.startswith('/'):
icon_url = icon_url[1:]
try:
zf = SafeUnzip(private_storage.open(file_obj.file_path))
zf.is_valid()
content = zf.extract_path(icon_url)
except (KeyError, forms.ValidationError): # Not found in archive.
log.error(u'[Webapp:%s] Icon %s not found in archive'
% (webapp, icon_url))
return False
else:
if not urlparse.urlparse(icon_url).scheme:
icon_url = webapp.origin + icon_url
try:
response = _fetch_content(icon_url)
except Exception, e:
log.error(u'[Webapp:%s] Failed to fetch icon for webapp: %s'
% (webapp, e))
# Set | |
<filename>networkx/algorithms/euler.py
"""
Eulerian circuits and graphs.
"""
from itertools import combinations
import networkx as nx
from ..utils import arbitrary_element, not_implemented_for
__all__ = [
"is_eulerian",
"eulerian_circuit",
"eulerize",
"is_semieulerian",
"has_eulerian_path",
"eulerian_path",
]
def is_eulerian(G):
"""Returns True if and only if `G` is Eulerian.
A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian
circuit* is a closed walk that includes each edge of a graph exactly
once.
Parameters
----------
G : NetworkX graph
A graph, either directed or undirected.
Examples
--------
>>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]}))
True
>>> nx.is_eulerian(nx.complete_graph(5))
True
>>> nx.is_eulerian(nx.petersen_graph())
False
Notes
-----
If the graph is not connected (or not strongly connected, for
directed graphs), this function returns False.
"""
if G.is_directed():
# Every node must have equal in degree and out degree and the
# graph must be strongly connected
return all(
G.in_degree(n) == G.out_degree(n) for n in G
) and nx.is_strongly_connected(G)
# An undirected Eulerian graph has no vertices of odd degree and
# must be connected.
return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G)
def is_semieulerian(G):
"""Return True iff `G` is semi-Eulerian.
G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit.
"""
return has_eulerian_path(G) and not is_eulerian(G)
def _find_path_start(G):
"""Return a suitable starting vertex for an Eulerian path.
If no path exists, return None.
"""
if not has_eulerian_path(G):
return None
if is_eulerian(G):
return arbitrary_element(G)
if G.is_directed():
v1, v2 = [v for v in G if G.in_degree(v) != G.out_degree(v)]
# Determines which is the 'start' node (as opposed to the 'end')
if G.out_degree(v1) > G.in_degree(v1):
return v1
else:
return v2
else:
# In an undirected graph randomly choose one of the possibilities
start = [v for v in G if G.degree(v) % 2 != 0][0]
return start
def _simplegraph_eulerian_circuit(G, source):
if G.is_directed():
degree = G.out_degree
edges = G.out_edges
else:
degree = G.degree
edges = G.edges
vertex_stack = [source]
last_vertex = None
while vertex_stack:
current_vertex = vertex_stack[-1]
if degree(current_vertex) == 0:
if last_vertex is not None:
yield (last_vertex, current_vertex)
last_vertex = current_vertex
vertex_stack.pop()
else:
_, next_vertex = arbitrary_element(edges(current_vertex))
vertex_stack.append(next_vertex)
G.remove_edge(current_vertex, next_vertex)
def _multigraph_eulerian_circuit(G, source):
if G.is_directed():
degree = G.out_degree
edges = G.out_edges
else:
degree = G.degree
edges = G.edges
vertex_stack = [(source, None)]
last_vertex = None
last_key = None
while vertex_stack:
current_vertex, current_key = vertex_stack[-1]
if degree(current_vertex) == 0:
if last_vertex is not None:
yield (last_vertex, current_vertex, last_key)
last_vertex, last_key = current_vertex, current_key
vertex_stack.pop()
else:
triple = arbitrary_element(edges(current_vertex, keys=True))
_, next_vertex, next_key = triple
vertex_stack.append((next_vertex, next_key))
G.remove_edge(current_vertex, next_vertex, next_key)
def eulerian_circuit(G, source=None, keys=False):
"""Returns an iterator over the edges of an Eulerian circuit in `G`.
An *Eulerian circuit* is a closed walk that includes each edge of a
graph exactly once.
Parameters
----------
G : NetworkX graph
A graph, either directed or undirected.
source : node, optional
Starting node for circuit.
keys : bool
If False, edges generated by this function will be of the form
``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``.
This option is ignored unless `G` is a multigraph.
Returns
-------
edges : iterator
An iterator over edges in the Eulerian circuit.
Raises
------
NetworkXError
If the graph is not Eulerian.
See Also
--------
is_eulerian
Notes
-----
This is a linear time implementation of an algorithm adapted from [1]_.
For general information about Euler tours, see [2]_.
References
----------
.. [1] <NAME>, <NAME>.
Matching, Euler tours and the Chinese postman.
Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
.. [2] https://en.wikipedia.org/wiki/Eulerian_path
Examples
--------
To get an Eulerian circuit in an undirected graph::
>>> G = nx.complete_graph(3)
>>> list(nx.eulerian_circuit(G))
[(0, 2), (2, 1), (1, 0)]
>>> list(nx.eulerian_circuit(G, source=1))
[(1, 2), (2, 0), (0, 1)]
To get the sequence of vertices in an Eulerian circuit::
>>> [u for u, v in nx.eulerian_circuit(G)]
[0, 2, 1]
"""
if not is_eulerian(G):
raise nx.NetworkXError("G is not Eulerian.")
if G.is_directed():
G = G.reverse()
else:
G = G.copy()
if source is None:
source = arbitrary_element(G)
if G.is_multigraph():
for u, v, k in _multigraph_eulerian_circuit(G, source):
if keys:
yield u, v, k
else:
yield u, v
else:
yield from _simplegraph_eulerian_circuit(G, source)
def has_eulerian_path(G, source=None):
"""Return True iff `G` has an Eulerian path.
An Eulerian path is a path in a graph which uses each edge of a graph
exactly once.
A directed graph has an Eulerian path iff:
- at most one vertex has out_degree - in_degree = 1,
- at most one vertex has in_degree - out_degree = 1,
- every other vertex has equal in_degree and out_degree,
- and all of its vertices with nonzero degree belong to a
single connected component of the underlying undirected graph.
An undirected graph has an Eulerian path iff:
- exactly zero or two vertices have odd degree,
- and all of its vertices with nonzero degree belong to a
- single connected component.
Parameters
----------
G : NetworkX Graph
The graph to find an euler path in.
source : node, optional
Starting node for circuit.
Returns
-------
Bool : True if G has an eulerian path.
See Also
--------
is_eulerian
eulerian_path
"""
if nx.is_eulerian(G):
return True
if G.is_directed():
# Remove isolated nodes (if any) without altering the input graph
nodes_remove = [v for v in G if G.in_degree[v] == 0 and G.out_degree[v] == 0]
if nodes_remove:
G = G.copy()
G.remove_nodes_from(nodes_remove)
ins = G.in_degree
outs = G.out_degree
# Since we know it is not eulerian, outs - ins must be 1 for source
if source is not None and outs[source] - ins[source] != 1:
return False
unbalanced_ins = 0
unbalanced_outs = 0
for v in G:
if ins[v] - outs[v] == 1:
unbalanced_ins += 1
elif outs[v] - ins[v] == 1:
unbalanced_outs += 1
elif ins[v] != outs[v]:
return False
return (
unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G)
)
else:
# We know it is not eulerian, so degree of source must be odd.
if source is not None and G.degree[source] % 2 != 1:
return False
# Sum is 2 since we know it is not eulerian (which implies sum is 0)
return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G)
def eulerian_path(G, source=None, keys=False):
"""Return an iterator over the edges of an Eulerian path in `G`.
Parameters
----------
G : NetworkX Graph
The graph in which to look for an eulerian path.
source : node or None (default: None)
The node at which to start the search. None means search over all
starting nodes.
keys : Bool (default: False)
Indicates whether to yield edge 3-tuples (u, v, edge_key).
The default yields edge 2-tuples
Yields
------
Edge tuples along the eulerian path.
Warning: If `source` provided is not the start node of an Euler path
will raise error even if an Euler Path exists.
"""
if not has_eulerian_path(G, source):
raise nx.NetworkXError("Graph has no Eulerian paths.")
if G.is_directed():
G = G.reverse()
if source is None or nx.is_eulerian(G) is False:
source = _find_path_start(G)
if G.is_multigraph():
for u, v, k in _multigraph_eulerian_circuit(G, source):
if keys:
yield u, v, k
else:
yield u, v
else:
yield from _simplegraph_eulerian_circuit(G, source)
else:
G = G.copy()
if source is None:
source = _find_path_start(G)
if G.is_multigraph():
if keys:
yield from reversed(
[(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)]
)
else:
yield from reversed(
[(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)]
)
else:
yield from reversed(
[(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)]
)
@not_implemented_for("directed")
def eulerize(G):
"""Transforms a graph into an Eulerian graph
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
G : NetworkX multigraph
Raises
------
NetworkXError
If the graph is not connected.
See Also
--------
is_eulerian
eulerian_circuit
References
----------
.. [1] <NAME>, <NAME>.
Matching, Euler tours and the Chinese postman.
Mathematical programming, Volume 5, Issue 1 (1973), 111-114.
[2] https://en.wikipedia.org/wiki/Eulerian_path
.. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf
Examples
--------
>>> G = nx.complete_graph(10)
>>> | |
<reponame>inakypg/tcf
#! /usr/bin/python3
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# FIXME:
#
# - if I release targets from a reservation, they need to be removed
# from the group list, so it is not confusing in the listing when I
# get/alloc-ls
# - reject messages to carry a 40x code?
# - each target allocation carries a max TTL per policy
# - starvation control missing
# - forbid fsdb writing to alloc fields
# - check lock order taking, always target or allocid,target
# * LRU caches needs being able to invalidate to avoid data
# contamination, consider https://pastebin.com/LDwMwtp8
#
# This is a simple priority queue allocator; the resource(s) to be
# allocated are groups of one or more targets.
#
# A user places a request for any of multiple targets by calling
# request().
#
# _run() implements the actual scheduler runs by calling _run_target()
#
# _run() is triggered by:
#
# - a new request()
#
# - an allocation deleted [allocdb.delete()]
#
# - periodically by the maintenance() process, which is called from
# the system's cleanup thread
#
"""
Dynamic preemptable queue multi-resource allocator
Highest priority is 0, lowest priority is > 0
Preemption use cases
^^^^^^^^^^^^^^^^^^^^
- Use case 1: queue for target with no preemption enabled
queue: N O:500 1:450 2:400 3:350
new waiter is added, 500P, whole thing is set to have preemption on
queue: P O:500 1:500P 2:450 3:400 4:350
- Use case 2: (builds on 1)
during a maintenance run (or other reason), prio of 1 is boosted by
50; preemption kicks in, kicks out O
queue: O:550 2:450 3:400 4:350
"""
import bisect
import collections
import datetime
import errno
import json
import logging
import numbers
import pprint
import os
import re
import shutil
import tempfile
import time
import uuid
import werkzeug
import commonl
import ttbl
import ttbl.config
import ttbl.user_control
path = None
_allocid_valid_regex = re.compile(r"^[_a-zA-Z0-9]+$")
# note this matches the valid characters that tmpfile.mkdtemp() will use
_allocid_valid = set("_0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ")
_queue_number_valid = set("0123456789")
# FIXME: consider defining these as constants so the state set can
# track missing stuff and make it harder to mess up, plus it'll do
# static checks
states = {
"invalid": "allocation is not valid",
"queued": "allocation is queued",
"busy": "targets cannot be allocated right now and queuing not allowed",
"removed": "allocation has been removed by the user",
"rejected": "user has no privilege for this operation",
"active": "allocation is being actively used",
"overtime": "maximum time-to-live exceeded",
# one of your targets was kicked out and another one assigned on
# its place, so go call GET /PREFIX/allocation/ALLOCATIONID to get
# the new targets and restart your run
"restart-needed": "allocation has been changed by a higher priority allocator",
"timedout": "allocation was idle for too long",
}
import collections
import time
class lru_aged_c(object):
# very basic, different types not considered, neither kwargs
def __init__(self, fn, ttl, maxsize):
self.cache = collections.OrderedDict()
self.fn = fn
self.ttl = ttl
self.maxsize = maxsize
def __call__(self, *args): # FIXME: support kwargs
timestamp = time.time()
if args in self.cache:
result, result_timestamp = self.cache[args]
if result_timestamp - timestamp <= self.ttl:
# FIXME: python 3 self.cache.move_to_end(args)
item = self.cache.pop(args)
self.cache[args] = item
return self.cache[args][0]
# fallthrough, item is too old, refresh
result = self.fn(*args)
self.cache[args] = (result, timestamp)
if len(self.cache) > self.maxsize:
self.cache.popitem(last = False)
return result
def cache_hit(self, args):
return args in self.cache
def invalidate(self, entry = None):
if entry == None:
del self.cache
self.cache = collections.OrderedDict()
elif entry in self.cache:
del self.cache[entry]
class allocation_c(commonl.fsdb_symlink_c):
"""
Backed by state in disk
Move backend symlink_set_c -> impl so it can be transitioned to a
Mongo or whatever
"""
def __init__(self, allocid):
dirname = os.path.join(path, allocid)
commonl.fsdb_symlink_c.__init__(self, dirname, concept = "allocid")
self.allocid = allocid
# protects writing to most fields
# - group
# - state
self.lock = ttbl.process_posix_file_lock_c(
os.path.join(dirname, "lockfile"))
self.targets_all = None
self.groups = None
self.target_info_reload()
@staticmethod
def __init_from_cache__(allocid):
# yeah, yeah, I could make lru_aged_c be smarter and know how
# to call methods, but it's late
return allocation_c(allocid)
def target_info_reload(self):
# Note the information about the targets and groups doesn't
# change once it is commited to the fsdb, so it is safe to
# load it just once
self.groups = {}
self.targets_all = {}
target_names_all = set()
for group_name, val in self.get_as_slist("group.*"):
target_names_group = set(val.split(','))
self.groups[group_name[6:]] = target_names_group
target_names_all |= target_names_group
for target_name in target_names_all:
try:
self.targets_all[target_name] = ttbl.test_target.get(target_name)
except KeyError:
raise self.invalid_e(
"%s: target no longer available" % target_name)
def delete(self, _state = "removed"):
try:
# if the reservation DB is messed up, this might fail --
# it is fine, we will then just wipe it
with self.lock:
if self.state_get() == 'active':
targets = {}
for target_name in self.get("group_allocated").split(","):
target = ttbl.test_target.get(target_name)
targets[target_name] = target
# cleanup each of the involved targets when
# active; this is a sum up of
# ttbl.test_target._deallocate()+
# _deallocate_simple(), since we know the
# steps are the same
target._state_cleanup(True)
target._allocid_wipe()
else:
targets = self.targets_all
finally:
# wipe the whole tree--this will render all the records that point
# to it invalid and the next _run() call will clean them
shutil.rmtree(self.location, True)
lru_aged_cache_allocation_c.invalidate(self.allocid)
# FIXME: implement a DB of recently deleted reservations so anyone
# trying to use it gets a state invalid/timedout/overtime/removed
# release all queueing/owning targets to it
if targets:
_run(targets.values(), False)
def set(self, key, value, force = True):
return commonl.fsdb_symlink_c.set(self, key, value, force = force)
def state_set(self, new_state):
"""
:returns *True* if succesful, *False* if it was set by someone
else
"""
assert new_state in states
self.set('state', new_state, force = True)
def state_get(self):
return self.get('state')
def timestamp(self):
# 20200323113030 is more readable than seconds since the epoch
# and we still can do easy arithmentic with it.
ts = time.strftime("%Y%m%d%H%M%S")
self.set('timestamp', ts, force = True)
return ts
def timestamp_get(self):
# if there is no timestamp, forge the Epoch
return self.get('timestamp', "19700101000000")
def maintenance(self, ts_now):
#logging.error("DEBUG: %s: maint %s", self.allocid, ts_now)
# Check if it has been idle for too long
ts_last_keepalive = datetime.datetime.strptime(self.timestamp_get(),
"%Y%m%d%H%M%S")
ts_idle = ts_now - ts_last_keepalive
seconds_idle = ts_idle.seconds
# days might be < 0 when the maintenance process has started
# and before we got here somebody timestamped the target, thus
# ts_last_keepalive > ts_now -> in this case, we are good, it
# is fresh
if ts_idle.days >= 0 and seconds_idle > ttbl.config.target_max_idle:
logging.info(
"ALLOC: allocation %s timedout (idle %s/%s), deleting",
self.allocid, seconds_idle, ttbl.config.target_max_idle)
self.delete('timedout')
return
# Check if it has been alive too long
# FIXME: define well how are we going to define the TTL
ttl = self.get("ttl", 0)
if ttl > 0:
ts_start = int(self.get('_alloc.timestamp_start'))
if ts_now - ts_start > ttl:
self.delete('overtime')
return
def calculate_stuff(self):
# lock so we don't have two processes doing the same
# processing after acquiring diffrent targets of our group the
# same time
with self.lock:
# We need to know if we have completely allocated all of
# the targets of any of the groups of targets this
# allocid requested
# Lookup all the targets this allocid has allocated
targets_allocated = set()
for target_name, target in self.targets_all.items():
allocid = target.allocid_get_bare()
if allocid == self.allocid:
targets_allocated.add(target_name)
# Iterate all the groups, see which are incomplete; for
# each target, collect their max boot score
targets_to_boost = collections.defaultdict(int)
for group_name, group in self.groups.items():
not_yet_allocated = group - targets_allocated
if not_yet_allocated:
# this group has still targets not allocated, will
# have to do starvation recalculation later
score = float(len(targets_allocated)) / len(group)
#logging.error(
# "DEBUG: group %s incomplete, score %f [%d/%d]",
# group_name, score, len(targets_allocated), len(group))
for target_name in not_yet_allocated:
targets_to_boost[target_name] = \
min(targets_to_boost[target_name], score)
else:
# This group is complete, so we don't need the
# other targets tentatively allocated, so return
# the list so they can be released
# all targets needed for this group have been
# allocated, let's then use it--if we set the "group"
# value, then we have it allocated
# Sort here because everywhere else we need a set
self.set("group_allocated", ",".join(sorted(group)))
self.set("timestamp_start", time.time())
self.set("ts_start", time.time()) # COMPAT
self.state_set("active")
#logging.error("DEBUG: %s: group %s complete, state %s",
# self.allocid, group_name,
# self.state_get())
return | |
<gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import re
import urllib.request
import json
import collections
import sys
import getopt
import external_error_pb2
from html.parser import HTMLParser
def parsing(externalErrorDesc):
#*********************************************************************************************#
#*********************************** CUDA Error Message **************************************#
print("start crawling errorMessage for nvidia CUDA API--->")
url = 'https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CUDA
ssl._create_default_https_context = ssl._create_unverified_context
html = urllib.request.urlopen(url).read().decode('utf-8')
res_div = r'<div class="section">.*?<p>CUDA error types </p>.*?</div>.*?<div class="enum-members">(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<dt>(.*?)</dt>.*?<dd>(.*?)</dd>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
res_type = r'<span class="enum-member-name-def">(.*?) = <span class="ph ph apiData">(.*?)</span></span>'
m_type = re.findall(res_type, error[0], re.S | re.M)[0]
m_message = error[1]
m_message = m_message.replace('\n', '')
res_a = r'(<a class=.*?</a>)'
res_shape = r'<a class=.*?>(.*?)</a>'
list_a = re.findall(res_a, m_message, re.S | re.M)
list_shape = re.findall(res_shape, m_message, re.S | re.M)
assert len(list_a) == len(list_shape)
for idx in range(len(list_a)):
m_message = m_message.replace(list_a[idx], list_shape[idx])
m_message = m_message.replace(
'<h6 class=\"deprecated_header\">Deprecated</h6>', '')
res_span = r'(<span class=.*?</span>)'
res_span_detail = r'<span class=.*?>(.*?)</span>'
list_span = re.findall(res_span, m_message, re.S | re.M)
list_span_detail = re.findall(res_span_detail, m_message, re.S | re.M)
assert len(list_span) == len(list_span_detail)
for idx in range(len(list_span)):
m_message = m_message.replace(list_span[idx], list_span_detail[idx])
res_p = r'(<p>.*?</p>)'
res_p_detail = r'<p>(.*?)</p>'
list_p = re.findall(res_p, m_message, re.S | re.M)
list_p_detail = re.findall(res_p_detail, m_message, re.S | re.M)
assert len(list_p) == len(list_p_detail)
for idx in range(len(list_p)):
m_message = m_message.replace(list_p[idx], list_p_detail[idx])
m_message = m_message.replace(' ', '')
_Messages = allMessageDesc.messages.add()
try:
_Messages.code = int(m_type[1])
except ValueError:
if re.match('0x', m_type[1]):
_Messages.code = int(m_type[1], 16)
else:
raise ValueError
_Messages.message = "'%s'. %s" % (m_type[0], m_message)
print("End crawling errorMessage for nvidia CUDA API!\n")
#***********************************************************************************************#
#*********************************** CURAND Error Message **************************************#
print("start crawling errorMessage for nvidia CURAND API--->")
url = 'https://docs.nvidia.com/cuda/curand/group__HOST.html#group__HOST_1gb94a31d5c165858c96b6c18b70644437'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CURAND
html = urllib.request.urlopen(url).read().decode('utf-8')
res_div = r'<div class="section">.*?<p>CURAND function call status types </p>.*?</div>.*?<div class="enum-members">(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<dt>(.*?)</dt>.*?<dd>(.*?)</dd>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
res_type = r'<span class="enum-member-name-def">(.*?) = <span class="ph ph apiData">(.*?)</span></span>'
m_type = re.findall(res_type, error[0], re.S | re.M)[0]
m_message = error[1]
_Messages = allMessageDesc.messages.add()
try:
_Messages.code = int(m_type[1])
except ValueError:
if re.match('0x', m_type[1]):
_Messages.code = int(m_type[1], 16)
else:
raise ValueError
_Messages.message = "'%s'. %s" % (m_type[0], m_message)
print("End crawling errorMessage for nvidia CURAND API!\n")
#**************************************************************************************************#
#*********************************** CUDNN Error Message ******************************************#
cudnnStatus_t = {
"CUDNN_STATUS_SUCCESS": 0,
"CUDNN_STATUS_NOT_INITIALIZED": 1,
"CUDNN_STATUS_ALLOC_FAILED": 2,
"CUDNN_STATUS_BAD_PARAM": 3,
"CUDNN_STATUS_INTERNAL_ERROR": 4,
"CUDNN_STATUS_INVALID_VALUE": 5,
"CUDNN_STATUS_ARCH_MISMATCH": 6,
"CUDNN_STATUS_MAPPING_ERROR": 7,
"CUDNN_STATUS_EXECUTION_FAILED": 8,
"CUDNN_STATUS_NOT_SUPPORTED": 9,
"CUDNN_STATUS_LICENSE_ERROR": 10,
"CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING": 11,
"CUDNN_STATUS_RUNTIME_IN_PROGRESS": 12,
"CUDNN_STATUS_RUNTIME_FP_OVERFLOW": 13,
}
print("start crawling errorMessage for nvidia CUDNN API--->")
url = 'https://docs.nvidia.com/deeplearning/cudnn/api/index.html#cudnnStatus_t'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CUDNN
html = urllib.request.urlopen(url).read().decode('utf-8')
f = open('1.txt', 'w')
f.write(html)
res_div = r'<div class="section" id="cudnnStatus_t__section_lmp_dgr_2jb"><a name="cudnnStatus_t__section_lmp_dgr_2jb" shape="rect">(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<dt class="dt dlterm"><samp class="ph codeph">(.*?)</samp></dt>.*?<dd class="dd">(.*?)</dd>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
m_message = error[1]
res_class = r'<p class="p">.*?</p>'
res_class_detail = r'<p class="p">(.*?)</p>'
list_class = re.findall(res_class, m_message, re.S | re.M)
list_class_detail = re.findall(res_class_detail, m_message, re.S | re.M)
assert len(list_class) == len(list_class_detail)
for idx in range(len(list_class)):
m_message = m_message.replace(list_class[idx],
list_class_detail[idx])
res_a = r'(<a class="xref".*?</a>)'
res_shape = r'<a class="xref".*?>(.*?)</a>'
list_a = re.findall(res_a, m_message, re.S | re.M)
list_shape = re.findall(res_shape, m_message, re.S | re.M)
assert len(list_a) == len(list_shape)
for idx in range(len(list_a)):
m_message = m_message.replace(list_a[idx], list_shape[idx])
res_span = r'(<span class="ph">.*?</span>)'
res_span_detail = r'<span class="ph">(.*?)</span>'
list_span = re.findall(res_span, m_message, re.S | re.M)
list_span_detail = re.findall(res_span_detail, m_message, re.S | re.M)
assert len(list_span) == len(list_span_detail)
for idx in range(len(list_span)):
m_message = m_message.replace(list_span[idx], list_span_detail[idx])
res_samp = r'(<samp class="ph codeph">.*?</samp>)'
res_samp_detail = r'<samp class="ph codeph">(.*?)</samp>'
list_samp = re.findall(res_samp, m_message, re.S | re.M)
list_samp_detail = re.findall(res_samp_detail, m_message, re.S | re.M)
assert len(list_samp) == len(list_samp_detail)
for idx in range(len(list_samp)):
m_message = m_message.replace(list_samp[idx], list_samp_detail[idx])
m_message = re.sub(r'\n +', ' ', m_message)
_Messages = allMessageDesc.messages.add()
_Messages.code = int(cudnnStatus_t[error[0]])
_Messages.message = "'%s'. %s" % (error[0], m_message)
print("End crawling errorMessage for nvidia CUDNN API!\n")
#*************************************************************************************************#
#*********************************** CUBLAS Error Message ****************************************#
cublasStatus_t = {
"CUBLAS_STATUS_SUCCESS": 0,
"CUBLAS_STATUS_NOT_INITIALIZED": 1,
"CUBLAS_STATUS_ALLOC_FAILED": 3,
"CUBLAS_STATUS_INVALID_VALUE": 7,
"CUBLAS_STATUS_ARCH_MISMATCH": 8,
"CUBLAS_STATUS_MAPPING_ERROR": 11,
"CUBLAS_STATUS_EXECUTION_FAILED": 13,
"CUBLAS_STATUS_INTERNAL_ERROR": 14,
"CUBLAS_STATUS_NOT_SUPPORTED": 15,
"CUBLAS_STATUS_LICENSE_ERROR": 16
}
print("start crawling errorMessage for nvidia CUBLAS API--->")
url = 'https://docs.nvidia.com/cuda/cublas/index.html#cublasstatus_t'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CUBLAS
html = urllib.request.urlopen(url).read().decode('utf-8')
res_div = r'<p class="p">The type is used for function status returns. All cuBLAS library.*?<div class="tablenoborder">(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<p class="p"><samp class="ph codeph">(.*?)</samp></p>.*?colspan="1">(.*?)</td>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
m_message = error[1]
m_message = re.sub(r'\n +', ' ', m_message)
res_p = r'<p class="p">.*?</p>'
res_p_detail = r'<p class="p">(.*?)</p>'
list_p = re.findall(res_p, m_message, re.S | re.M)
list_p_detail = re.findall(res_p_detail, m_message, re.S | re.M)
assert len(list_p) == len(list_p_detail)
for idx in range(len(list_p)):
m_message = m_message.replace(list_p[idx], list_p_detail[idx])
res_samp = r'<samp class="ph codeph">.*?</samp>'
res_samp_detail = r'<samp class="ph codeph">(.*?)</samp>'
list_samp = re.findall(res_samp, m_message, re.S | re.M)
list_samp_detail = re.findall(res_samp_detail, m_message, re.S | re.M)
assert len(list_samp) == len(list_samp_detail)
for idx in range(len(list_samp)):
m_message = m_message.replace(list_samp[idx], list_samp_detail[idx])
_Messages = allMessageDesc.messages.add()
_Messages.code = int(cublasStatus_t[error[0]])
_Messages.message = "'%s'. %s" % (error[0], m_message)
print("End crawling errorMessage for nvidia CUBLAS API!\n")
#*************************************************************************************************#
#*********************************** CUSOLVER Error Message **************************************#
cusolverStatus_t = {
"CUSOLVER_STATUS_SUCCESS": 0,
"CUSOLVER_STATUS_NOT_INITIALIZED": 1,
"CUSOLVER_STATUS_ALLOC_FAILED": 2,
"CUSOLVER_STATUS_INVALID_VALUE": 3,
"CUSOLVER_STATUS_ARCH_MISMATCH": 4,
"CUSOLVER_STATUS_MAPPING_ERROR": 5,
"CUSOLVER_STATUS_EXECUTION_FAILED": 6,
"CUSOLVER_STATUS_INTERNAL_ERROR": 7,
"CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED": 8,
"CUSOLVER_STATUS_NOT_SUPPORTED": 9,
"CUSOLVER_STATUS_ZERO_PIVOT": 10,
"CUSOLVER_STATUS_INVALID_LICENSE": 11,
"CUSOLVER_STATUS_IRS_PARAMS_NOT_INITIALIZED": 12,
"CUSOLVER_STATUS_IRS_PARAMS_INVALID": 13,
"CUSOLVER_STATUS_IRS_INTERNAL_ERROR": 14,
"CUSOLVER_STATUS_IRS_NOT_SUPPORTED": 15,
"CUSOLVER_STATUS_IRS_OUT_OF_RANGE": 16,
"CUSOLVER_STATUS_IRS_NRHS_NOT_SUPPORTED_FOR_REFINE_GMRES": 17,
"CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED": 18
}
print("start crawling errorMessage for nvidia CUSOLVER API--->")
url = 'https://docs.nvidia.com/cuda/cusolver/index.html#cuSolverSPstatus'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CUSOLVER
html = urllib.request.urlopen(url).read().decode('utf-8')
res_div = r'This is a status type returned by the library functions and.*?<div class="tablenoborder">(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<samp class="ph codeph">(.*?)</samp></td>.*?colspan="1">(.*?)</td>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
m_message = error[1]
m_message = re.sub(r'\n +', '', m_message)
m_message = re.sub(r'<p class="p"></p>', '', m_message)
res_p = r'<p class="p">.*?</p>'
res_p_detail = r'<p class="p">(.*?)</p>'
list_p = re.findall(res_p, m_message, re.S | re.M)
list_p_detail = re.findall(res_p_detail, m_message, re.S | re.M)
assert len(list_p) == len(list_p_detail)
for idx in range(len(list_p)):
m_message = m_message.replace(list_p[idx], list_p_detail[idx])
res_samp = r'<samp class="ph codeph">.*?</samp>'
res_samp_detail = r'<samp class="ph codeph">(.*?)</samp>'
list_samp = re.findall(res_samp, m_message, re.S | re.M)
list_samp_detail = re.findall(res_samp_detail, m_message, re.S | re.M)
assert len(list_samp) == len(list_samp_detail)
for idx in range(len(list_samp)):
m_message = m_message.replace(list_samp[idx], list_samp_detail[idx])
res_strong = r'<strong class="ph b">.*?</strong>'
res_strong_detail = r'<strong class="ph b">(.*?)</strong>'
list_strong = re.findall(res_strong, m_message, re.S | re.M)
list_strong_detail = re.findall(res_strong_detail, m_message, re.S |
re.M)
assert len(list_strong) == len(list_strong_detail)
for idx in range(len(list_strong)):
m_message = m_message.replace(list_strong[idx],
list_strong_detail[idx])
_Messages = allMessageDesc.messages.add()
_Messages.code = int(cusolverStatus_t[error[0]])
_Messages.message = "'%s'. %s" % (error[0], m_message)
print("End crawling errorMessage for nvidia CUSOLVER API!\n")
#**********************************************************************************************#
#*************************************** NCCL error *******************************************#
print("start crawling errorMessage for nvidia NCCL API--->")
url = 'https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/api/types.html#ncclresult-t'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.NCCL
html = urllib.request.urlopen(url).read().decode('utf-8')
res_div = r'<code class="descname">ncclResult_t</code>(.*?)</div>'
m_div = re.findall(res_div, html, re.S | re.M)[0]
res_dt = r'<code class="descname">(.*?)</code>.*?<span class="pre">(.*?)</span></code>\)(.*?)</p>\n</dd></dl>'
m_dt = re.findall(res_dt, m_div, re.S | re.M)
for error in m_dt:
m_message = re.sub(r'\n', '', error[2])
_Messages = allMessageDesc.messages.add()
_Messages.code = int(error[1])
_Messages.message = "'%s'. %s" % (error[0], m_message)
print("End crawling errorMessage for nvidia NCCL API!\n")
#*************************************************************************************************#
#*********************************** CUFFT Error Message **************************************#
print("start crawling errorMessage for nvidia CUFFT API--->")
url = 'https://docs.nvidia.com/cuda/cufft/index.html#cufftresult'
allMessageDesc = externalErrorDesc.errors.add()
allMessageDesc.type = external_error_pb2.CUFFT
html = urllib.request.urlopen(url).read().decode('utf-8')
class CUFFTHTMLParser(HTMLParser):
'''CUFFTHTML Parser
'''
def handle_data(self, data):
if 'typedef enum cufftResult_t' in data:
for line in data.strip().splitlines()[1:-1]:
status, code, desc = re.split('=|//', line.strip())
_Messages = allMessageDesc.messages.add()
_Messages.code = int(code.strip(' ,'))
_Messages.message = "'%s'. %s" % (status.strip(),
desc.strip())
CUFFTHTMLParser().feed(html)
def main(argv):
try:
opts, _ = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
print('python spider.py')
sys.exit(2)
| |
averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 245:
return 'Height of LCL in hPa, source data is averaged between 0 .. 500m'
if table2Version == 203 and indicatorOfParameter == 244:
return 'Convective inhibition (cin)'
if table2Version == 203 and indicatorOfParameter == 243:
return 'Convective available potential energy, value of parameter when -40C < T < -10C'
if table2Version == 203 and indicatorOfParameter == 242:
return 'Convective available potential energy, source data is LCL-500 and EL-500'
if table2Version == 203 and indicatorOfParameter == 241:
return 'Convective available potential energy'
if table2Version == 203 and indicatorOfParameter == 240:
return 'Height of EL in meters'
if table2Version == 203 and indicatorOfParameter == 239:
return 'Height of LFC in meters'
if table2Version == 203 and indicatorOfParameter == 238:
return 'Height of LCL in meters'
if table2Version == 203 and indicatorOfParameter == 237:
return 'Height of EL in hPa'
if table2Version == 203 and indicatorOfParameter == 236:
return 'Height of LFC in hPa'
if table2Version == 203 and indicatorOfParameter == 235:
return 'Height of LCL in hPa'
if table2Version == 203 and indicatorOfParameter == 234:
return '100th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 233:
return '90th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 232:
return '75th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 231:
return '50th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 230:
return '25th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 229:
return '10th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 228:
return '0th fractal wind speed in EPS'
if table2Version == 203 and indicatorOfParameter == 227:
return '100th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 226:
return '90th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 225:
return '75th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 224:
return '50th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 223:
return '25th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 222:
return '10th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 221:
return '0th fractal wind gust speed in EPS'
if table2Version == 203 and indicatorOfParameter == 220:
return '100th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 219:
return '90th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 218:
return '75th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 217:
return '50th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 216:
return '25th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 215:
return '10th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 214:
return '0th fractal cloudiness in EPS'
if table2Version == 203 and indicatorOfParameter == 213:
return 'Solid precipitation rate (f.ex. snow+graupel)'
if table2Version == 203 and indicatorOfParameter == 212:
return 'Graupel rate in mm/h'
if table2Version == 203 and indicatorOfParameter == 211:
return 'Vegetation type'
if table2Version == 203 and indicatorOfParameter == 210:
return 'V-component of momentum flux in N m-2'
if table2Version == 203 and indicatorOfParameter == 209:
return 'U-component of momentum flux in N m-2'
if table2Version == 203 and indicatorOfParameter == 208:
return 'Kinetic energy of turbulence in J kg-1'
if table2Version == 203 and indicatorOfParameter == 207:
return 'Soil type'
if table2Version == 203 and indicatorOfParameter == 206:
return 'FMIWEATHERSYMBOL1'
if table2Version == 203 and indicatorOfParameter == 205:
return 'CAPE, source data is most unstable, value of parameter when -40C < T < -10C'
if table2Version == 203 and indicatorOfParameter == 204:
return 'CAPE, source data is most unstable, value of CAPE between 0 .. 3km'
if table2Version == 203 and indicatorOfParameter == 203:
return 'Scalar momentum flux in Pa'
if table2Version == 203 and indicatorOfParameter == 202:
return 'Sensible heat flux'
if table2Version == 203 and indicatorOfParameter == 201:
return 'Latent heat flux'
if table2Version == 203 and indicatorOfParameter == 200:
return 'Canopy water'
if table2Version == 203 and indicatorOfParameter == 199:
return 'Convective inhibition, source data is most unstable'
if table2Version == 203 and indicatorOfParameter == 198:
return 'Ozone anomaly'
if table2Version == 203 and indicatorOfParameter == 197:
return 'UV index'
if table2Version == 203 and indicatorOfParameter == 196:
return 'UV index maximum'
if table2Version == 203 and indicatorOfParameter == 195:
return 'Convective available potential energy, source data is most unstable'
if table2Version == 203 and indicatorOfParameter == 194:
return 'Height of EL in meters, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 193:
return '100th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 192:
return '90th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 191:
return '75th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 190:
return '50th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 189:
return '25th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 188:
return '10th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 187:
return '0th fractal precipitation in EPS'
if table2Version == 203 and indicatorOfParameter == 186:
return 'Soil Moisture Content in Kg per square meter'
if table2Version == 203 and indicatorOfParameter == 185:
return 'Height of LFC in meters, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 184:
return 'Height of LCL in meters, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 183:
return 'Surface Roughness in Meters'
if table2Version == 203 and indicatorOfParameter == 182:
return 'Height of EL in hPa, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 181:
return 'Height of LFC in hPa, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 180:
return 'Height of LCL in hPa, source data is found from most unstable level'
if table2Version == 203 and indicatorOfParameter == 179:
return '100th fractal (ie. maximum) temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 178:
return '90th fractal temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 177:
return '75th fractal temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 176:
return '50th fractal temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 175:
return '25th fractal temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 174:
return '10th fractal temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 173:
return '0th fractal (ie. minimum) temperature in EPS'
if table2Version == 203 and indicatorOfParameter == 172:
return 'Total totals index'
if table2Version == 203 and indicatorOfParameter == 171:
return 'Vertical totals index'
if table2Version == 203 and indicatorOfParameter == 170:
return 'Cross totals index'
if table2Version == 203 and indicatorOfParameter == 169:
return 'Lifted index'
if table2Version == 203 and indicatorOfParameter == 168:
return 'Showalter index'
if table2Version == 203 and indicatorOfParameter == 167:
return 'Mixed layer height in m'
if table2Version == 203 and indicatorOfParameter == 166:
return 'Instant solid precipitation (snow+graupel) in kg/m2'
if table2Version == 203 and indicatorOfParameter == 165:
return 'Instant rain in kg/m2'
if table2Version == 203 and indicatorOfParameter == 164:
return 'Surface Roughness (momentum) in meters'
if table2Version == 203 and indicatorOfParameter == 163:
return 'Inverse of Monin-Obukhov length, i.e. 1/L in m-1'
if table2Version == 203 and indicatorOfParameter == 161:
return 'Probability of snow'
if table2Version == 203 and indicatorOfParameter == 159:
return 'Extreme forecast index for | |
<reponame>MichaelDoron/imaginaire
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from imaginaire.generators.fs_vid2vid import LabelEmbedder
from imaginaire.layers import Conv2dBlock, LinearBlock, Res2dBlock
from imaginaire.model_utils.fs_vid2vid import (extract_valid_pose_labels,
resample)
from imaginaire.utils.data import (get_paired_input_image_channel_number,
get_paired_input_label_channel_number)
from imaginaire.utils.init_weight import weights_init
class BaseNetwork(nn.Module):
r"""vid2vid generator."""
def __init__(self):
super(BaseNetwork, self).__init__()
def get_num_filters(self, num_downsamples):
r"""Get the number of filters at current layer.
Args:
num_downsamples (int) : How many downsamples at current layer.
Returns:
output (int) : Number of filters.
"""
return min(self.max_num_filters,
self.num_filters * (2 ** num_downsamples))
class Generator(BaseNetwork):
r"""vid2vid generator constructor.
Args:
gen_cfg (obj): Generator definition part of the yaml config file.
data_cfg (obj): Data definition part of the yaml config file.
"""
def __init__(self, gen_cfg, data_cfg):
super().__init__()
self.gen_cfg = gen_cfg
self.data_cfg = data_cfg
self.num_frames_G = data_cfg.num_frames_G
# Number of residual blocks in generator.
self.num_layers = num_layers = getattr(gen_cfg, 'num_layers', 7)
# Number of downsamplings for previous frame.
self.num_downsamples_img = getattr(gen_cfg, 'num_downsamples_img', 4)
# Number of filters in the first layer.
self.num_filters = num_filters = getattr(gen_cfg, 'num_filters', 32)
self.max_num_filters = getattr(gen_cfg, 'max_num_filters', 1024)
self.kernel_size = kernel_size = getattr(gen_cfg, 'kernel_size', 3)
padding = kernel_size // 2
# For pose dataset.
self.is_pose_data = hasattr(data_cfg, 'for_pose_dataset')
if self.is_pose_data:
pose_cfg = data_cfg.for_pose_dataset
self.pose_type = getattr(pose_cfg, 'pose_type', 'both')
self.remove_face_labels = getattr(pose_cfg, 'remove_face_labels',
False)
# Input data params.
num_input_channels = get_paired_input_label_channel_number(data_cfg)
num_img_channels = get_paired_input_image_channel_number(data_cfg)
aug_cfg = data_cfg.val.augmentations
if hasattr(aug_cfg, 'center_crop_h_w'):
crop_h_w = aug_cfg.center_crop_h_w
elif hasattr(aug_cfg, 'resize_h_w'):
crop_h_w = aug_cfg.resize_h_w
else:
raise ValueError('Need to specify output size.')
crop_h, crop_w = crop_h_w.split(',')
crop_h, crop_w = int(crop_h), int(crop_w)
# Spatial size at the bottle neck of generator.
self.sh = crop_h // (2 ** num_layers)
self.sw = crop_w // (2 ** num_layers)
# Noise vector dimension.
self.z_dim = getattr(gen_cfg, 'style_dims', 256)
self.use_segmap_as_input = \
getattr(gen_cfg, 'use_segmap_as_input', False)
# Label / image embedding network.
self.emb_cfg = emb_cfg = getattr(gen_cfg, 'embed', None)
self.use_embed = getattr(emb_cfg, 'use_embed', 'True')
self.num_downsamples_embed = getattr(emb_cfg, 'num_downsamples', 5)
if self.use_embed:
self.label_embedding = LabelEmbedder(emb_cfg, num_input_channels)
# Flow network.
self.flow_cfg = flow_cfg = gen_cfg.flow
# Use SPADE to combine warped and hallucinated frames instead of
# linear combination.
self.spade_combine = getattr(flow_cfg, 'multi_spade_combine', True)
# Number of layers to perform multi-spade combine.
self.num_multi_spade_layers = getattr(flow_cfg.multi_spade_combine,
'num_layers', 3)
# At beginning of training, only train an image generator.
self.temporal_initialized = False
# Whether to output hallucinated frame (when training temporal network)
# for additional loss.
self.generate_raw_output = False
# Image generation network.
weight_norm_type = getattr(gen_cfg, 'weight_norm_type', 'spectral')
activation_norm_type = gen_cfg.activation_norm_type
activation_norm_params = gen_cfg.activation_norm_params
if self.use_embed and \
not hasattr(activation_norm_params, 'num_filters'):
activation_norm_params.num_filters = 0
nonlinearity = 'leakyrelu'
self.base_res_block = base_res_block = partial(
Res2dBlock, kernel_size=kernel_size, padding=padding,
weight_norm_type=weight_norm_type,
activation_norm_type=activation_norm_type,
activation_norm_params=activation_norm_params,
nonlinearity=nonlinearity, order='NACNAC')
# Upsampling residual blocks.
for i in range(num_layers, -1, -1):
activation_norm_params.cond_dims = self.get_cond_dims(i)
activation_norm_params.partial = self.get_partial(
i) if hasattr(self, 'get_partial') else False
layer = base_res_block(self.get_num_filters(i + 1),
self.get_num_filters(i))
setattr(self, 'up_%d' % i, layer)
# Final conv layer.
self.conv_img = Conv2dBlock(num_filters, num_img_channels,
kernel_size, padding=padding,
nonlinearity=nonlinearity, order='AC')
num_filters = min(self.max_num_filters,
num_filters * (2 ** (self.num_layers + 1)))
if self.use_segmap_as_input:
self.fc = Conv2dBlock(num_input_channels, num_filters,
kernel_size=3, padding=1)
else:
self.fc = LinearBlock(self.z_dim, num_filters * self.sh * self.sw)
# Misc.
self.downsample = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.upsample = partial(F.interpolate, scale_factor=2)
self.init_temporal_network()
def forward(self, data):
r"""vid2vid generator forward.
Args:
data (dict) : Dictionary of input data.
Returns:
output (dict) : Dictionary of output data.
"""
label = data['label']
label_prev, img_prev = data['prev_labels'], data['prev_images']
is_first_frame = img_prev is None
z = getattr(data, 'z', None)
bs, _, h, w = label.size()
if self.is_pose_data:
label, label_prev = extract_valid_pose_labels(
[label, label_prev], self.pose_type, self.remove_face_labels)
# Get SPADE conditional maps by embedding current label input.
cond_maps_now = self.get_cond_maps(label, self.label_embedding)
# Input to the generator will either be noise/segmentation map (for
# first frame) or encoded previous frame (for subsequent frames).
if is_first_frame:
# First frame in the sequence, start from scratch.
if self.use_segmap_as_input:
x_img = F.interpolate(label, size=(self.sh, self.sw))
x_img = self.fc(x_img)
else:
if z is None:
z = torch.randn(bs, self.z_dim, dtype=label.dtype,
device=label.get_device()).fill_(0)
x_img = self.fc(z).view(bs, -1, self.sh, self.sw)
# Upsampling layers.
for i in range(self.num_layers, self.num_downsamples_img, -1):
j = min(self.num_downsamples_embed, i)
x_img = getattr(self, 'up_' + str(i))(x_img, *cond_maps_now[j])
x_img = self.upsample(x_img)
else:
# Not the first frame, will encode the previous frame and feed to
# the generator.
x_img = self.down_first(img_prev[:, -1])
# Get label embedding for the previous frame.
cond_maps_prev = self.get_cond_maps(label_prev[:, -1],
self.label_embedding)
# Downsampling layers.
for i in range(self.num_downsamples_img + 1):
j = min(self.num_downsamples_embed, i)
x_img = getattr(self, 'down_' + str(i))(x_img,
*cond_maps_prev[j])
if i != self.num_downsamples_img:
x_img = self.downsample(x_img)
# Resnet blocks.
j = min(self.num_downsamples_embed, self.num_downsamples_img + 1)
for i in range(self.num_res_blocks):
cond_maps = cond_maps_prev[j] if i < self.num_res_blocks // 2 \
else cond_maps_now[j]
x_img = getattr(self, 'res_' + str(i))(x_img, *cond_maps)
flow = mask = img_warp = None
num_frames_G = self.num_frames_G
# Whether to warp the previous frame or not.
warp_prev = self.temporal_initialized and not is_first_frame and \
label_prev.shape[1] == num_frames_G - 1
if warp_prev:
# Estimate flow & mask.
label_concat = torch.cat([label_prev.view(bs, -1, h, w),
label], dim=1)
img_prev_concat = img_prev.view(bs, -1, h, w)
flow, mask = self.flow_network_temp(label_concat, img_prev_concat)
img_warp = resample(img_prev[:, -1], flow)
if self.spade_combine:
# if using SPADE combine, integrate the warped image (and
# occlusion mask) into conditional inputs for SPADE.
img_embed = torch.cat([img_warp, mask], dim=1)
cond_maps_img = self.get_cond_maps(img_embed,
self.img_prev_embedding)
x_raw_img = None
# Main image generation branch.
for i in range(self.num_downsamples_img, -1, -1):
# Get SPADE conditional inputs.
j = min(i, self.num_downsamples_embed)
cond_maps = cond_maps_now[j]
# For raw output generation.
if self.generate_raw_output:
if i >= self.num_multi_spade_layers - 1:
x_raw_img = x_img
if i < self.num_multi_spade_layers:
x_raw_img = self.one_up_conv_layer(x_raw_img, cond_maps, i)
# For final output.
if warp_prev and i < self.num_multi_spade_layers:
cond_maps += cond_maps_img[j]
x_img = self.one_up_conv_layer(x_img, cond_maps, i)
# Final conv layer.
img_final = torch.tanh(self.conv_img(x_img))
img_raw = None
if self.spade_combine and self.generate_raw_output:
img_raw = torch.tanh(self.conv_img(x_raw_img))
if warp_prev and not self.spade_combine:
img_raw = img_final
img_final = img_final * mask + img_warp * (1 - mask)
output = dict()
output['fake_images'] = img_final
output['fake_flow_maps'] = flow
output['fake_occlusion_masks'] = mask
output['fake_raw_images'] = img_raw
output['warped_images'] = img_warp
return output
def one_up_conv_layer(self, x, encoded_label, i):
r"""One residual block layer in the main branch.
Args:
x (4D tensor) : Current feature map.
encoded_label (list of tensors) : Encoded input label maps.
i (int) : Layer index.
Returns:
x (4D tensor) : Output feature map.
"""
layer = getattr(self, 'up_' + str(i))
x = layer(x, *encoded_label)
if i != 0:
x = self.upsample(x)
return x
def init_temporal_network(self, cfg_init=None):
r"""When starting training multiple frames, initialize the
downsampling network and flow network.
Args:
cfg_init (dict) : Weight initialization config.
"""
# Number of image downsamplings for the previous frame.
num_downsamples_img = self.num_downsamples_img
# Number of residual blocks for the previous frame.
self.num_res_blocks = int(
np.ceil((self.num_layers - num_downsamples_img) / 2.0) * 2)
# First conv layer.
num_img_channels = get_paired_input_image_channel_number(self.data_cfg)
self.down_first = \
Conv2dBlock(num_img_channels,
self.num_filters, self.kernel_size,
padding=self.kernel_size // 2)
if cfg_init is not None:
self.down_first.apply(weights_init(cfg_init.type, cfg_init.gain))
# Downsampling residual blocks.
activation_norm_params = self.gen_cfg.activation_norm_params
for i in range(num_downsamples_img + 1):
activation_norm_params.cond_dims = self.get_cond_dims(i)
layer = self.base_res_block(self.get_num_filters(i),
self.get_num_filters(i + 1))
if cfg_init is not None:
layer.apply(weights_init(cfg_init.type, cfg_init.gain))
setattr(self, 'down_%d' % i, layer)
# Additional residual blocks.
res_ch = self.get_num_filters(num_downsamples_img + 1)
activation_norm_params.cond_dims = \
self.get_cond_dims(num_downsamples_img + 1)
for i in range(self.num_res_blocks):
layer = self.base_res_block(res_ch, res_ch)
if cfg_init is not None:
layer.apply(weights_init(cfg_init.type, cfg_init.gain))
setattr(self, 'res_%d' % i, layer)
# Flow network.
flow_cfg = self.flow_cfg
self.temporal_initialized = True
self.generate_raw_output = getattr(flow_cfg, 'generate_raw_output',
False) and self.spade_combine
self.flow_network_temp = FlowGenerator(flow_cfg, self.data_cfg)
if cfg_init is not None:
self.flow_network_temp.apply(weights_init(cfg_init.type,
cfg_init.gain))
self.spade_combine = getattr(flow_cfg, 'multi_spade_combine', True)
if self.spade_combine:
emb_cfg = flow_cfg.multi_spade_combine.embed
num_img_channels = get_paired_input_image_channel_number(
self.data_cfg)
self.img_prev_embedding = LabelEmbedder(emb_cfg,
num_img_channels + 1)
if cfg_init is not None:
self.img_prev_embedding.apply(weights_init(cfg_init.type,
cfg_init.gain))
def get_cond_dims(self, num_downs=0):
r"""Get the dimensions of conditional inputs.
Args:
num_downs (int) : How many downsamples at current layer.
Returns:
ch (list) : List of | |
<filename>turdshovel/_stubs/Microsoft/Diagnostics/Runtime/Utilities.py
# encoding: utf-8
# module Microsoft.Diagnostics.Runtime.Utilities calls itself Utilities
# from Microsoft.Diagnostics.Runtime, Version=2.0.5.1201, Culture=neutral, PublicKeyToken=31bf3856ad364e35
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class COMHelper(object):
""" Base class for COM related objects in ClrMD. """
@staticmethod
def QueryInterface(pUnk, riid, result):
""" QueryInterface(pUnk: IntPtr, riid: Guid) -> (HResult, Guid, IntPtr) """
pass
@staticmethod
def Release(pUnk):
"""
Release(pUnk: IntPtr) -> int
Release an IUnknown pointer.
pUnk: A pointer to the IUnknown interface to release.
Returns: The result of pUnk->Release().
"""
pass
AddRefDelegate = None
IUnknownGuid = None
QueryInterfaceDelegate = None
ReleaseDelegate = None
class CallableCOMWrapper(COMHelper, IDisposable):
# no doc
def AddRef(self):
""" AddRef(self: CallableCOMWrapper) -> int """
pass
def Dispose(self):
""" Dispose(self: CallableCOMWrapper) """
pass
def QueryInterface(self, riid):
""" QueryInterface(self: CallableCOMWrapper, riid: Guid) -> (IntPtr, Guid) """
pass
def Release(self):
""" Release(self: CallableCOMWrapper) -> int """
pass
def SuppressRelease(self):
""" SuppressRelease(self: CallableCOMWrapper) """
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
"""
__new__(cls: type, toClone: CallableCOMWrapper)
__new__(cls: type, library: RefCountedFreeLibrary, desiredInterface: Guid, pUnknown: IntPtr) -> Guid
"""
pass
Self = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_vtable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class COMCallableIUnknown(COMHelper):
"""
A class that allows you to build a custom IUnknown based interface to pass as a COM object.
This class is public to allow others to use this code and not duplicate it, but it is not
intended for general use.
COMCallableIUnknown()
"""
def AddInterface(self, guid, validate):
"""
AddInterface(self: COMCallableIUnknown, guid: Guid, validate: bool) -> VTableBuilder
Adds an IUnknown based interface to this COM object.
guid: The GUID of this interface.
validate: Whether or not to validate the delegates that
used to build this COM interface's methods.
Returns: A VTableBuilder to construct this interface. Note that until VTableBuilder.Complete
is called, the interface will not be registered.
"""
pass
def AddRef(self):
"""
AddRef(self: COMCallableIUnknown) -> int
AddRef.
Returns: The new ref count.
"""
pass
def Destroy(self, *args): #cannot find CLR method
""" Destroy(self: COMCallableIUnknown) """
pass
def Release(self):
"""
Release(self: COMCallableIUnknown) -> int
Release.
Returns: The new RefCount.
"""
pass
IUnknown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the IUnknown VTable for this object.
Get: IUnknown(self: COMCallableIUnknown) -> IUnknownVTable
"""
IUnknownObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the IUnknown pointer to this object.
Get: IUnknownObject(self: COMCallableIUnknown) -> IntPtr
"""
class ElfAuxvType(Enum, IComparable, IFormattable, IConvertible):
""" enum ElfAuxvType, values: Base (7), Null (0) """
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Base = None
Null = None
value__ = None
class ElfCoreFile(object, IDisposable):
"""
A helper class to read linux coredumps.
ElfCoreFile(coredump: str)
ElfCoreFile(stream: Stream, leaveOpen: bool)
"""
def Dispose(self):
""" Dispose(self: ElfCoreFile) """
pass
def EnumeratePRStatus(self):
"""
EnumeratePRStatus(self: ElfCoreFile) -> IEnumerable[IElfPRStatus]
Enumerates all prstatus notes contained within this coredump.
"""
pass
def GetAuxvValue(self, type):
"""
GetAuxvValue(self: ElfCoreFile, type: ElfAuxvType) -> UInt64
Returns the Auxv value of the given type.
"""
pass
def ReadMemory(self, address, buffer):
""" ReadMemory(self: ElfCoreFile, address: UInt64, buffer: Span[Byte]) -> int """
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, coredump: str)
__new__(cls: type, stream: Stream, leaveOpen: bool)
"""
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
ElfFile = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""All coredumps are themselves ELF files. This property returns the ElfFile that represents this coredump.
Get: ElfFile(self: ElfCoreFile) -> ElfFile
"""
LoadedImages = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A mapping of all loaded images in the process. The key is the base address that the module is loaded at.
Get: LoadedImages(self: ElfCoreFile) -> ImmutableDictionary[UInt64, ElfLoadedImage]
"""
class ElfFile(object, IDisposable):
"""
A helper class to read ELF files.
ElfFile(filename: str)
ElfFile(stream: Stream, leaveOpen: bool)
ElfFile(stream: Stream, position: UInt64, leaveOpen: bool, isVirtual: bool)
"""
def Dispose(self):
""" Dispose(self: ElfFile) """
pass
def TryGetExportSymbol(self, symbolName, offset):
"""
TryGetExportSymbol(self: ElfFile, symbolName: str) -> (bool, UInt64)
Returns the address of a module export symbol if found
symbolName: symbol name (without the module name prepended)
Returns: true if found
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, filename: str)
__new__(cls: type, stream: Stream, leaveOpen: bool)
__new__(cls: type, stream: Stream, position: UInt64, leaveOpen: bool, isVirtual: bool)
"""
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
BuildId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Returns the build id of this ELF module (or ImmutableArray.Default if it doesn't exist).
Get: BuildId(self: ElfFile) -> ImmutableArray[Byte]
"""
Header = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The ElfHeader of this file.
Get: Header(self: ElfFile) -> IElfHeader
"""
Notes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The list of ElfNotes for this file.
Get: Notes(self: ElfFile) -> ImmutableArray[ElfNote]
"""
ProgramHeaders = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The list of ProgramHeaders for this file.
Get: ProgramHeaders(self: ElfFile) -> ImmutableArray[ElfProgramHeader]
"""
class ElfHeaderType(Enum, IComparable, IFormattable, IConvertible):
"""
The type of ELF file.
enum ElfHeaderType, values: Core (4), Executable (2), Relocatable (1), Shared (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
| |
'diff', 'f_val', 'lambda_ls_y']:
soln[item] = locals().get(item)
return soln
def sdm(x0, f, j, tol, notify_status_func, inner_loop_condition=None):
"""
Steepest descent method.
<NAME>. / <NAME> / <NAME>. (2015):
Numerical Analysis. Clifton Park, NY (Cengage Learning).
:param inner_loop_condition: line search condition
:param x0: initial estimate. Array length N.
:param f: function. Returns array length N.
:param j: jacobian. Returns array N X N.
:param tol: absolute tolerance.
:param notify_status_func: logging function.
:return: x, array with reduced gradient to tol level.
"""
def g(x_var):
f_val = asarray(f(x_var))
return f_val.T.dot(f_val)
x = x0
k = 1
stop = False
max_it = 1000
while k < max_it and not stop:
x_n_m_1 = x
f_x = asarray(f(x))
j_x = asarray(j(x))
z = asarray(2 * j_x.dot(f_x))
z0 = asarray(sqrt(z.T.dot(z)))
if z0 == 0:
# Zero gradient
# stop = True
break
z = z / z0
alpha1 = 0
alpha3 = 1
if inner_loop_condition is not None:
# external restriction for alpha 3 (backtracking)
inner_it_j = 0
while inner_it_j <= max_it and \
not inner_loop_condition(x - alpha3 * z):
inner_it_j += 1
alpha3 = alpha3 / 2.0
g1 = g(x - alpha1 * z)
g3 = g(x - alpha3 * z)
while g3 >= g1 and alpha3 > tol / 2.0:
alpha3 = alpha3 / 2.0
g3 = g(x - alpha3 * z)
alpha2 = alpha3 / 2.0
g2 = g(x - alpha2 * z)
"""
(Note: Newton’s forward divided-difference formula is used to find
the quadratic P(α) = g1 + h1α + h3α(α − α2) that interpolates
h(α) at α = 0, α = α2, α = α3.)
"""
h1 = (g2 - g1) / alpha2
h2 = (g3 - g2) / (alpha3 - alpha2)
h3 = (h2 - h1) / alpha3
# (The critical point of P occurs at α0.)
alpha0 = 0.5 * (alpha2 - h1 / h3)
g0 = g(x - alpha0 * z)
if g0 < g3:
alpha = alpha0
g_min = g0
else:
alpha = alpha3
g_min = g3
x = x - alpha * z
g_min_minus_g1 = g_min - g1
if abs(g_min_minus_g1) < tol:
stop = True # Procedure successful
# Non-functional status notification
diff = x - x_n_m_1
outer_it_k = k
inner_it_j, accum_step, lambda_ls = k, k, nan
progress_k = (-g_min_minus_g1 / tol) * 100.
notify_status_func(progress_k, stop, outer_it_k,
inner_it_j, lambda_ls, accum_step,
x, diff, f_x, j_x, z,
nan, g_min=g_min, g1=g1)
# End non-functional notification
k += 1
return x
def line_search(fun, jac, x_c, known_f_c=None, known_j_c=None,
max_iter=50, alpha=1e-4, tol=1e-8,
additional_restrictions=None, notify_status_func=None,
outer_it=1, accum_step=0):
"""Line search algorithm
Jr., <NAME> ; Schnabel, <NAME>.: Numerical Methods for Unconstrained
Optimization and Nonlinear Equations. Philadelphia: SIAM, 1996.
Returns x+, f(x+), g(x+), s_0_n, j(x+) such that x+ = x_c + lambda s_0_n satisfies \
f(x+) <= f(xc) + alpha lambda (nabla f(x_0))^T s_0^N
:param fun: function
:param jac: jacobian
:param known_f_c: if already calculated, j_c
:param known_j_c: if already calculated, j_c
:param x_c: initial estimate for x+
:param max_iter: maximum iterations
:param alpha: stringence constant in (0,1/2)
:param tol: step tolerance
:param additional_restrictions: optional restrictions imposed on x. Line search when False.
:param notify_status_func: optional function to log result
:param accum_step: optional accumulated step count
:param outer_it: optional initial iteration count
:return: x+, f(x+), g(x+), s_0_n, j(x+), backtrack_count, lambda_ls, \
magnitude_f, outer_it_stop, accum_step
"""
if known_f_c is not None:
f_0 = known_f_c
else:
f_0 = fun(x_c)
if known_j_c is not None:
j_0 = known_j_c
else:
j_0 = jac(x_c)
# p in the Newton-direction: $s_N = -J(x_c)^{-1} F(x_c)$
s_0_n = solve_ax_equal_b(j_0, -f_0)
# relative length of p as calculated in the stopping routine
if size(x_c) == 1:
rellength = abs(s_0_n / x_c)
else:
rellength = max(abs(s_0_n) / max(abs(x_c)))
# minimum allowable step length
lambda_min = tol / rellength
# FIXME: lambda_min can sometimes be lower than tol / rellength
lambda_min = finfo(float).eps * lambda_min
# $\nabla f(x_c)^T s^N = -F(x_c)^T F(x_c)$
# initslope: expressed (p344) $g^T p$ as gradient . direction
g_0 = 1 / 2. * scalar_prod(f_0, f_0)
g_prime_t_s = -scalar_prod(f_0, f_0)
# first attempt full Newton step
lambda_ls = 1.0
# init other variables
backtrack_count = 0
lambda_temp = lambda_ls
lambda_prev = lambda_ls
x_2 = empty_like(x_c)
f_2 = empty_like(f_0)
g_2 = empty_like(g_0)
g_1 = empty_like(g_0)
magnitude_f = empty_like(lambda_ls)
g_max = empty_like(lambda_ls)
progress_factor = 1 / (1 - 10. ** (5 * (2 - 1))) * \
log(0.1) # prog=0.1, x=10^-5 & tol=10^-10
stop = False
outer_it_stop = False
while backtrack_count < max_iter and not stop:
x_2 = x_c + lambda_ls * s_0_n
f_2 = fun(x_2)
g_2 = 1 / 2. * scalar_prod(f_2, f_2)
descent = alpha * lambda_ls * g_prime_t_s
g_max = g_0 + descent
nan_result = isnan(f_2).any()
# add current lambda to accumulated steps
accum_step += lambda_ls
while nan_result and lambda_ls >= lambda_min:
# handle case in which f_2 throws nan as rough line search
# Non-functional status notification
if notify_status_func is not None:
diff = (lambda_ls - lambda_prev) * s_0_n
inner_it_j = backtrack_count
magnitude_f = sqrt(2 * g_2)
progress_k = exp(
(-magnitude_f + tol) / tol * progress_factor) * 100.
notify_status_func(
progress_k,
outer_it_stop and stop,
outer_it,
inner_it_j,
lambda_ls,
accum_step,
x_2,
diff,
f_2,
j_0,
lambda_ls * s_0_n,
backtrack_count,
g_min=g_2,
g1=g_max)
# End non-functional notification
accum_step -= lambda_ls
backtrack_count += 1
lambda_ls = 0.1 * lambda_ls
x_1 = x_c + lambda_ls * s_0_n
f_1 = fun(x_1)
lambda_prev = lambda_ls
lambda_ls = 0.5 * lambda_ls
x_2 = x_c + lambda_ls * s_0_n
f_2 = fun(x_2)
g_2 = 1 / 2. * scalar_prod(f_2, f_2)
nan_result = isnan(f_1).any() or isnan(f_2).any()
accum_step += lambda_ls
satisfactory = g_2 <= g_max
stop = satisfactory
magnitude_f = sqrt(2 * g_2)
outer_it_stop = magnitude_f < tol
if additional_restrictions is not None:
stop = satisfactory and additional_restrictions(x_2)
if lambda_ls < lambda_min:
# satisfactory x_2 cannot be found sufficiently distinct from x_c
outer_it_stop = True
stop = True
pass
# Non-functional status notification
if notify_status_func is not None:
diff = (lambda_ls - lambda_prev) * s_0_n
inner_it_j = backtrack_count
progress_k = exp(
(-magnitude_f + tol) / tol * progress_factor) * 100.
notify_status_func(progress_k, outer_it_stop and stop, outer_it,
inner_it_j, lambda_ls, accum_step,
x_2, diff, f_2, j_0, lambda_ls * s_0_n,
backtrack_count, g_min=g_2, g1=g_max)
# End non-functional notification
if not stop:
# reduce lambda
# backtrack accumulated steps in current lambda,
# then reduce lambda once more
accum_step -= lambda_ls
backtrack_count += 1
if lambda_ls == 1:
# first backtrack: quadratic fit
lambda_temp = -g_prime_t_s / (
2 * (g_2 - g_0 - g_prime_t_s)
)
elif lambda_ls < 1:
# subsequent backtracks: cubic fit
a, b = 1 / (lambda_ls - lambda_prev) * array(
[[+1 / lambda_ls ** 2, - 1 / lambda_prev ** 2],
[- lambda_prev / lambda_ls ** 2, +lambda_ls / lambda_prev ** 2]]
).dot(array(
[[g_2 - g_0 - g_prime_t_s * lambda_ls],
[g_1 - g_0 - g_prime_t_s * lambda_prev]]))
a = a.item()
b = b.item()
disc = b ** 2 - 3 * a * g_prime_t_s
if a == 0:
# actually quadratic
lambda_temp = -g_prime_t_s / (2 * b)
else:
# legitimate cubic
lambda_temp = (-b + sqrt(disc)) / (3 * a)
if lambda_temp > 1 / 2 * lambda_ls:
lambda_temp = 1 / 2 * lambda_ls
lambda_prev = lambda_ls
g_1 = g_2
if lambda_temp <= 0.1 * lambda_ls:
lambda_ls = 0.1 * lambda_ls
else:
lambda_ls = lambda_temp
j_2 = jac(x_2)
soln = dict()
for item in ['x_2', 'f_2', 'g_2', 's_0_n', 'j_2', 'magnitude_f', 'g_max',
'backtrack_count', 'lambda_ls', 'outer_it_stop', 'accum_step']:
soln[item] = locals().get(item)
return soln
def scalar_prod(factor_a, factor_b):
if size(factor_b) > 1:
return factor_a.dot(factor_b)
elif size(factor_b) == 1:
return factor_a * factor_b
def solve_ax_equal_b(factor_a, term_b):
if size(term_b) > 1:
return gauss_elimination(factor_a, term_b)
elif size(term_b) == 1:
return 1 / factor_a * term_b
# noinspection PyUnboundLocalVariable
def secant_ls_3p(y, x_0, tol, x_1=None, f_prime=None,
max_it=100, alpha=1e-4, restriction=None, print_iterations=False):
"""3 point secant method with line search algorithm (single dimension)
Tiruneh, <NAME>. "A modified three-point Secant method with improved rate and
characteristics of convergence." | |
<filename>rheia/CASES/H2_MOBILITY/h2_mobility.py<gh_stars>1-10
"""
The :py:mod:`h2_mobility` module contains a class to read the required data and
a class to evaluate the power-to-mobility system.
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pvlib
class ReadData:
"""
This class enables to read data from the data files.
Parameters
----------
filename_climate : str
The directory of the file with information on the
solar irradiance.
"""
def __init__(self, filename_climate):
self.filename_climate = filename_climate
self.path = os.path.dirname(os.path.abspath(__file__))
def load_climate(self):
"""
This method loads the hourly solar irradiance data
and ambient temperature data,
situated in the 'sol_irr' and 'T_amb' columns of the
climate data file.
Returns
-------
sol_irr : ndarray
The hourly solar irradiance data for a Typical
Meteorological Year.
t_amb : ndarray
The hourly ambient temperature data for a Typical
Meteorological Year.
"""
data = pd.read_csv(self.filename_climate)
sol_irr = data['sol_irr'].to_numpy()
t_amb = data['T_amb'].to_numpy()
return sol_irr, t_amb
def load_parameters(self):
"""
This method loads the deterministic values of the model
parameters, defined in the design_space file. This is
useful when the deterministic performance of a specific
design needs to be evaluated.
Returns
-------
param_dict : dict
Dictionary with the names of the model parameters
and the corresponding deterministic values.
"""
param_dict = {}
design_space = os.path.join(self.path, 'design_space')
# read the deterministic values for the parameters in `design_space`
with open(design_space, 'r') as file:
for line in file:
tmp = line.split()
if tmp[1] == 'par':
param_dict[tmp[0]] = float(tmp[2])
return param_dict
class Evaluation:
"""
This class evaluates the power-to-mobility system.
For a given design, the solar irradiance, ambient temperature
and the characterization of the model parameters,
the levelized cost of driving, carbon intensity and the annual
grid consumption are quantified.
Parameters
----------
sol_irr : ndarray
The hourly solar irradiance for the evaluated year.
t_amb : ndarray
The hourly ambient temperature for the evaluated year.
parameters : dict
Dictionary with the model parameters and design variables values.
"""
def __init__(self, sol_irr, t_amb, par):
self.par = par
# the solar irradiance and ambient temperature are scaled with the
# corresponding uncertainty
self.sol_irr = sol_irr * self.par['u_sol_irr']
self.t_amb = t_amb + self.par['u_t_amb']
self.length = len(self.sol_irr)
# the result dictionary
self.res = {}
# the system lifetime
self.par['life_sys'] = 20.
# initialize the operating hours of the electrolyzer array
self.res['running_hours_pemel'] = 0.
# initialize the storage tank size and its starting status
self.m_h2_max = self.tank()
self.m_h2_min = 0.05 * self.m_h2_max
self.m_h2 = self.m_h2_min
# instantiate the profiles for grid electricity price
self.demand_profiles()
# the number of PEM electrolyzer cells, corresponding to the
# nominal capacity of the considered PEM cell and the provided
# PEM capacity
self.n_pemel_array = self.par['n_pemel'] / 0.4
# the fitted polynomials on the electrolyzer and compressor
self.polyfit_pemel()
self.polyfit_pemel_compr()
def demand_profiles(self):
"""
Set the grid electricity price for buying and selling electricity.
A contract with fixed electricity price is considered, for which the
price for buying electricity consists of three segments: the energy
price itself (i.e. 'elec cost'), the profit made on this price by the
electricity provider (i.e. 'elec_cost_profit') and the fraction of the
energy price to the final retail price (i.e. 'elec_cost_ratio', e.g.
when this value equal 0.3, then the energy price corresponds to 30% of
the final bill, while 70% corresponds to transportation cost,
taxes,...). The price for selling electricity back to the grid
corresponds to the energy price.
In addition, the demand profiles from the hydrogen buses and diesel
buses is determined, based on the European daily refueling profile [1].
[1] <NAME>, I.Williamson, <NAME>, and <NAME>,
“Decarbonising city bus networks in ireland with renewable hydrogen,”
International Journal of Hydrogen Energy, 2020.
"""
# electricity cost profile [euro/Wh]
self.elec_profile = np.ones(self.length) * (
(self.par['elec_cost'] +
self.par['elec_cost_profit']) /
self.par['elec_cost_ratio']) / 1e6
# electricity selling profile [euro/Wh]
self.elec_profile_sale = np.ones(
self.length) * self.par['elec_cost'] / 1e6
self.diesel_profile = np.ones(self.length) * self.par['diesel_cost']
# number of km driven per day per bus
self.par['n_km_bus'] = 250.
# number of buses in the fleet
self.par['n_bus'] = 40.
# energy consumed by the diesel buses and hydrogen buses per day
# [kWh/day]
energy_h2 = (self.par['cons_h2_bus'] * self.par['n_km_bus'] *
self.par['n_h2_bus'])
energy_diesel = (self.par['cons_diesel_bus'] * self.par['n_km_bus'] *
(self.par['n_bus'] - self.par['n_h2_bus']))
h2_required = energy_h2 / 33.33 # kg
diesel_required = energy_diesel / 10. # litre
# the daily refueling profile of the buses.
fill_profile = np.array([0.09, 0.015, 0.005, 0.04, 0.04, 0., 0.01,
0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0.08, 0.08, 0.13, 0.13, 0.13, 0.13, 0.11])
# daily refueling profile for the hydrogen buses and diesel buses
day_h2 = np.ones(24) * fill_profile * h2_required
day_diesel = np.ones(24) * fill_profile * diesel_required
# annual refueling profiles
self.load_h2 = list(day_h2) * int(365 * self.length / 8760)
self.load_diesel = list(day_diesel) * int(365 * self.length / 8760)
# dispenser capacity such that the hourly hydrogen demand can be
# complied with
dispenser_mass_flow_rate = 33.333
self.par['n_disp'] = max(self.load_h2) / dispenser_mass_flow_rate
#############################
# photovoltaic array module #
#############################
def quantify_mpp(self, sol_irr, t_amb, pv_system):
"""
Quantify the maximum power of the photovoltaic array
for a given solar irradiance and ambient temperature.
Parameters
----------
sol_irr : float
The solar irradiance [W/m2].
t_amb : float
The ambient temperature [C].
pv_system : pandas.core.series.Series
The pv system characteristics
Returns
-------
pmp : float
The maximum power.
"""
# quantify the parameters for the pv system using De Soto method
pv_inputs = pvlib.pvsystem.calcparams_desoto(sol_irr,
t_amb,
pv_system['alpha_sc'],
pv_system['a_ref'],
pv_system['I_L_ref'],
pv_system['I_o_ref'],
pv_system['R_sh_ref'],
pv_system['R_s'],
EgRef=1.121,
dEgdT=-0.0002677,
irrad_ref=1000.,
temp_ref=25.)
# determine the maximum power for the given pv system
pmp = pvlib.pvsystem.max_power_point(pv_inputs[0],
pv_inputs[1],
pv_inputs[2],
pv_inputs[3],
pv_inputs[4],
method='newton')['p_mp']
return pmp
def photovoltaic(self):
"""
The hourly photovoltaic power is quantified via the PVlib package.
Using this package, first the characteristics for a typical
photovoltaic panel are defined. Based on these characteristics,
the maximum power point is quantified for each hour, based on the
corresponding solar irradiance and ambient temperature. Finally, the
hourly power production is scaled by the considered photovoltaic array
capacity.
"""
p_pv = np.zeros(len(self.sol_irr))
# get the specific photovoltaic panel characteristics
pv_database = pvlib.pvsystem.retrieve_sam('CECmod')
pv_system = pv_database.SunPower_SPR_X19_240_BLK
# determine the maximum power point at reference conditions
p_mpp_ref = self.quantify_mpp(1000., 25., pv_system) # W
# maximum power point determination for each hour in the timeframe
for i, irr in enumerate(self.sol_irr):
if irr > 0.:
p_mpp = self.quantify_mpp(irr, self.t_amb[i], pv_system)
p_pv[i] = p_mpp / p_mpp_ref * self.par['n_pv'] * 1e3 # W
else:
p_pv[i] = 0.
# store the hourly pv power in the result dictionary
self.res['p_pv'] = p_pv
# the dc-dc converter capacity in kW
self.res['n_dcdc_pv'] = max(p_pv) / 1e3
#############################
# electrolyzer array module #
#############################
def pemel(self, i_pemel):
"""
The electrolyzer model, based on the work of Saeed et al. [2]. For a
given current, the model determines the operating voltage by
considering the activation, concentration and ohmic overpotentials.
The model quantifies the operating voltage, power, efficiency and
hydrogen production.
[2] <NAME>., & <NAME>. (2015). Modeling and Analysis of
Renewable PEM Fuel Cell System. Energy Procedia, 74, 87–101.
https://doi.org/10.1016/j.egypro.2015.07.527
Parameters
----------
i_pemel : float
The electrolyzer input current [A].
Returns
-------
res : dict
Dictionary with the operating conditions of the electrolyzer for a
given current. It contains items on the operating voltage, power,
efficiency and hydrogen mass flow rate.
"""
par_pemel = {'T': 353.,
'a': 1.,
'p_o2': 1.,
'p_h2': 1.,
'p_h2o': 1.,
'i_L': 2.,
'A': 100.,
'i_0': 1e-4,
'n': 2.,
't_mem': 50e-4,
'alpha': 0.3,
'R': 8.3143,
'F': 96485.,
'HHV': 141.7e6,
}
res = {}
i = i_pemel / par_pemel['A']
# minimum operating voltage of electrolyzer
e_0 = (1.48 - 0.85e-3 * (par_pemel['T'] - 298.15) + 4.3085e-5 *
par_pemel['T'] * np.log(par_pemel['p_h2'] *
np.sqrt(par_pemel['p_o2']) /
par_pemel['p_h2o']))
# activation overpotential
v_act = (np.log(i / par_pemel['i_0']) /
(par_pemel['alpha'] * par_pemel['n'] * par_pemel['F']) *
par_pemel['R'] * par_pemel['T'])
# ohmic overpotential
lambda_mem = (0.043 + 17.81 * par_pemel['a'] -
39.85 * par_pemel['a']**2. +
36. * par_pemel['a']**3.)
sigma_mem = ((0.005139 * lambda_mem - 0.00326) *
np.exp(1268 * (1. / 303. - 1. / par_pemel['T'])))
v_ohm = | |
{...}}}}
worker_archive = self.checkpoints.get(origin, {})
round_archive = worker_archive.get(round_key, {})
round_archive.update({epoch_key: logs})
worker_archive.update({round_key: round_archive})
self.checkpoints.update({origin: worker_archive})
# Is server considered a worker, since server also needs to train
# declaring server model, optimizers and criterion? Same as local worker?
logging.debug("Beginning to train server")
logging.debug(f"workers list: {self.workers}")
# Got error when running training w/o send..
_, global_train_loss = train_server(
whether_distill_on_the_server=self.whether_distill_on_the_server,
server_epochs=self.server_epochs,
model_server=server_model,
server_opt=server_opt,
extracted_feature_dict=extracted_feature_dict,
logits_dict=logits_dict,
labels_dict=labels_dict,
server_criterion=server_criterion
)
logging.debug(f"global server logits dict: {server_logits_dict}")
self.global_train_loss = global_train_loss
finally:
loop.close()
return models, optimizers, schedulers, criterions, stoppers #, server_model, server_opt
##################
# Core functions #
##################
def initialise(self):
""" Encapsulates all operations required for FedGKT suppport """
model_structure = self.global_model
shape = self.shape_after_alignment()
logging.debug(f"initialise_shape: {shape}")
L = self.L
logging.debug(f"fedgkt model_structure: {model_structure}")
logging.debug(f"criterion_params: {self.arguments.criterion_params}")
local_models = self.generate_local_models(model_structure, L, shape)
prev_models = self.generate_local_models(model_structure, L, shape)
server_model = self.generate_server_model(model_structure, L)
server_opt = self.arguments.optimizer(
**self.arguments.optimizer_params,
params=server_model.parameters()
)
server_criterion = self.build_custom_criterion()(
**self.arguments.criterion_params
)
optimizers = {
w: self.arguments.optimizer(
**self.arguments.optimizer_params,
params=model.parameters()
) for w, model in local_models.items()
}
schedulers = {
w: self.arguments.lr_scheduler(
**self.arguments.lr_decay_params,
optimizer=optimizer
)
for w, optimizer in optimizers.items()
}
criterions = {
w: self.build_custom_criterion()(
**self.arguments.criterion_params
) for w,m in local_models.items()
}
stoppers = {
w: EarlyStopping(
**self.arguments.early_stopping_params
) for w,m in local_models.items()
}
return (
local_models,
prev_models,
optimizers,
schedulers,
criterions,
stoppers,
server_model,
server_opt,
server_criterion
)
def fit(self):
""" Performs federated training using a pre-specified model as
a template, across initialised worker nodes, coordinated by
a ttp node.
Returns:
Trained global model (Model)
"""
###########################
# Implementation Footnote #
###########################
# However, due to certain PySyft nuances (refer to Part 4, section 1:
# Frame of Reference) there is a need to choose a conceptual
# representation of the overall architecture. Here, the node agnostic
# variant is implemented. Model is stored in the server -> Client
# (i.e. 'Me') does not interact with it
# Note: If MPC is requested, global model itself cannot be shared, only
# its copies are shared. This is due to restrictions in PointerTensor
# mechanics.
# apply to server loss
global_val_stopper = EarlyStopping(**self.arguments.early_stopping_params)
rounds = 0
pbar = tqdm(total=self.arguments.rounds, desc='Rounds', leave=True)
while rounds < self.arguments.rounds:
logging.warning(f"Before training - TTP: {self.crypto_provider}, workers: {self.workers}")
(
local_models,
prev_models,
optimizers,
schedulers,
criterions,
stoppers,
server_model,
server_opt,
server_criterion
) = self.initialise()
self.perform_parallel_training(
datasets=self.train_loader,
models=local_models,
cache=prev_models,
optimizers=optimizers,
schedulers=schedulers,
criterions=criterions,
stoppers=stoppers,
rounds=rounds,
epochs=self.arguments.epochs,
server_model=server_model,
server_opt=server_opt,
server_criterion=server_criterion
)
# Retrieve all models from their respective workers
logging.debug(f"Current server model:\n {server_model.state_dict()}")
self.server_model = server_model
# Local losses for worker
final_local_losses = {
w.id: c._cache[-1].get()
for w,c in criterions.items()
}
logging.debug(f'final_local_losses: {final_local_losses}')
# Store local losses for analysis
for w_id, loss in final_local_losses.items():
local_loss_archive = self.loss_history['local'].get(w_id, {})
local_loss_archive.update({rounds: loss.item()})
self.loss_history['local'][w_id] = local_loss_archive
global_train_loss = self.global_train_loss
# Validate the global model
_, evaluation_losses = self.evaluate(metas=['evaluate'])
global_val_loss = evaluation_losses['evaluate']
logging.debug(f"global_val_loss: {global_val_loss}")
# # Store global losses for analysis
global_loss_archive = self.loss_history['global']
global_train_losses = global_loss_archive.get('train', {})
global_train_losses.update({rounds: global_train_loss.item()})
global_val_losses = global_loss_archive.get('evaluate', {})
global_val_losses.update({rounds: global_val_loss.item()})
self.loss_history['global'] = {
'train': global_train_losses,
'evaluate': global_val_losses
}
# If server model is deemed to have stagnated, stop training
global_val_stopper(global_val_loss, self.server_model)
if global_val_stopper.early_stop:
logging.info("Global model has stagnated. Training round terminated!\n")
break
logging.warning(f"Before GC attempt - TTP: {self.crypto_provider}, workers: {self.workers}")
self.clear_garbage(metas=["train", "evaluate"])
logging.warning(f"After GC attempt - TTP: {self.crypto_provider}, workers: {self.workers}")
rounds += 1
pbar.update(1)
pbar.close()
# logging.debug(f"Objects in TTP: {self.crypto_provider}, {len(self.crypto_provider._objects)}")
# logging.debug(f"Objects in sy.local_worker: {sy.local_worker}, {len(sy.local_worker._objects)}")
logging.debug(f"server logits size: {len(server_logits_dict)}")
logging.debug(f"logits size: {len(logits_dict)}")
logging.debug(f"labels size: {len(labels_dict)}")
logging.debug(f"ef size: {len(extracted_feature_dict)}")
return self.server_model, self.local_models #, server_model
def analyse(self):
""" Calculates contributions of all workers towards the final global
model.
"""
raise NotImplementedError
def export(self, out_dir: str = None, excluded: List[str] = []) -> dict:
""" Snapshots the current state of federated cycle and exports all
models to file. A dictionary is produced as a rous
An archive's structure looks like this:
{
'global': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported final global model(s)>,
'loss_history': <path(s) to final global loss history(s)>,
'checkpoints': {
'round_0': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported global model(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported globalmodel(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
...
},
'round_1': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to global exported model(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported global model(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
...
}
...
}
},
'local_<idx>': {
'origin': <worker ID>,
'path': <path(s) to exported final local model(s)>,
'loss_history': <path(s) to final local loss history(s)>,
'checkpoints': {
'round_0': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
...
},
'round_1': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
...
}
...
}
},
...
}
Args:
out_dir (str): Path to output directory for export
excluded (list(str)): Federated attributes to skip when exporting.
Attribute options are as follows:
1) 'global': Skips current state of the global model
2) 'local': Skips current states of all local models
3) 'loss': Skips current state of global & local loss histories
4) 'checkpoint': Skips all checkpointed metadata
Returns:
Archive (dict)
"""
# Override cached output directory with specified directory if any
out_dir = out_dir if out_dir else self.out_dir
def save_server_model():
if 'global' in excluded: return None
# Export server model (as the global component) to file
server_model_out_path = os.path.join(
out_dir,
"server_model.pt"
)
# Only states can be saved, since Model is not picklable
if self.server_model != None:
th.save(self.server_model.state_dict(), server_model_out_path)
else:
server_model_out_path = ""
return server_model_out_path
out_paths = super().export(out_dir, excluded)
# Package global metadata for storage
out_paths['global']['path'] = save_server_model()
return out_paths
def restore(
self,
archive: dict,
version: Tuple[str, str] = None
):
""" Restores model states from a previously archived training run. If
version is not specified, then restore the final state of the grid.
If version is specified, restore the state of all models conforming
to that version's snapshot.
An archive's structure looks like this:
{
'global': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported final global model(s)>,
'loss_history': <path(s) to final global loss history(s)>,
'checkpoints': {
'round_0': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported global model(s)>,
'loss_history': <path(s) to globalloss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported globalmodel(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
...
},
'round_1': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to global exported model(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported global model(s)>,
'loss_history': <path(s) to global loss history(s)>,
},
...
}
...
}
},
'local_<idx>': {
'origin': <worker ID>,
'path': <path(s) to exported final local model(s)>,
'loss_history': <path(s) to final local loss history(s)>,
'checkpoints': {
'round_0': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
...
},
'round_1': {
'epoch_0': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
'epoch_1': {
'origin': <crypto_provider ID>,
'path': <path(s) to exported local model(s)>,
'loss_history': <path(s) to local loss history(s)>,
},
...
}
...
}
},
...
}
Args:
archive (dict): Dictionary containing versioned histories of
exported filepaths corresponding to the state of models within a
training cycle
version (tuple(str)): A | |
<reponame>alan-turing-institute/pysf
from .logger import LoggingHandler
from .splits import SlidingWindowTimeSeriesSplit
from .errors import RawResiduals
# Core dependencies
import pandas as pd
import xarray as xr
import numpy as np
import copy
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
# Dependencies for data acquisition
import requests
from io import BytesIO
from zipfile import ZipFile
import tempfile
import os
import scipy.io as sio
def load_dummy_data_df(series_count = 10, timestamp_count = 5, time_feature_count = 3, series_feature_count = 2, vs_times_series_factor = 10000, vs_times_timestamps_factor = 100, vs_series_series_factor = 10000):
colname_series = 'series'
colname_timestamp = 'timestamp'
colnames_time_features = [('time_label_' + str(i)) for i in range(1,time_feature_count+1)]
colnames_series_features = [('series_label_' + str(i)) for i in range(1,series_feature_count+1)]
ts = 1+np.arange(timestamp_count).reshape(-1,+1)
ft = 1+np.arange(time_feature_count).reshape(+1,-1)
temp_right_df = pd.DataFrame(columns = ([colname_timestamp] + colnames_time_features), data = np.concatenate((ts, (ts * vs_times_timestamps_factor) + ft), axis=1))
temp_right_df['key'] = 999
temp_left_df = pd.DataFrame(columns = [colname_series], data = (1+np.arange(series_count)))
temp_left_df['key'] = 999
temp_df = temp_left_df.merge(temp_right_df, how='outer', on='key')
temp_df = temp_df.drop('key', axis=1)
temp_df[colnames_time_features] = (temp_df[colname_series].values * vs_times_series_factor).reshape(-1,+1) + temp_df[colnames_time_features]
dummy_vs_times_df = temp_df
s = 1+np.arange(series_count).reshape(-1,+1)
fs = 1+np.arange(series_feature_count).reshape(+1,-1)
dummy_vs_series_df = pd.DataFrame(columns = ([colname_series] + colnames_series_features), data = np.concatenate((s, (s * vs_series_series_factor) + fs), axis=1))
return (dummy_vs_times_df, dummy_vs_series_df)
def download_zipfile(url):
content = requests.get(url)
zipped = ZipFile(BytesIO(content.content))
return zipped
def download_ramsay_weather_data_dfs():
zipped = download_zipfile('http://www.psych.mcgill.ca/misc/fda/downloads/FDAfuns/Matlab/fdaM.zip')
tempdir = tempfile.TemporaryDirectory()
zipped.extract(member='examples/weather/daily.mat', path=tempdir.name)
weather_data_dict = sio.loadmat(os.path.join(tempdir.name, 'examples/weather/daily.mat'))
weather_tempav_df = pd.DataFrame(weather_data_dict['tempav'])
weather_tempav_df['day_of_year'] = weather_tempav_df.index.values + 1
weather_tempav_df = pd.melt(weather_tempav_df, id_vars=['day_of_year'])
weather_tempav_df.rename(columns={'variable' : 'weather_station', 'value' : 'tempav'}, inplace=True)
#weather_tempav_df
weather_precav_df = pd.DataFrame(weather_data_dict['precav'])
weather_precav_df['day_of_year'] = weather_precav_df.index.values + 1
weather_precav_df = pd.melt(weather_precav_df, id_vars=['day_of_year'])
weather_precav_df.rename(columns={'variable' : 'weather_station', 'value' : 'precav'}, inplace=True)
#weather_precav_df
weather_vs_times_df = pd.merge(weather_tempav_df, weather_precav_df)
weather_vs_series_df= None
return (weather_vs_times_df, weather_vs_series_df)
def download_ramsay_growth_data_dfs():
zipped = download_zipfile('http://www.psych.mcgill.ca/misc/fda/downloads/FDAfuns/Matlab/fdaM.zip')
tempdir = tempfile.TemporaryDirectory()
zipped.extract(member='examples/growth/growth.mat', path=tempdir.name)
growth_data_dict = sio.loadmat(os.path.join(tempdir.name, 'examples/growth/growth.mat'))
ages_arr = growth_data_dict['age']
boys_df = pd.DataFrame(growth_data_dict['hgtmmat'])
boys_df['age'] = ages_arr
boys_df = pd.melt(boys_df, id_vars=['age'])
boys_df.rename(columns={'variable' : 'cohort_id', 'value' : 'height'}, inplace=True)
boys_df['gender'] = 'boy'
#boys_df
girls_df = pd.DataFrame(growth_data_dict['hgtfmat'])
girls_df['age'] = ages_arr
girls_df = pd.melt(girls_df, id_vars=['age'])
girls_df.rename(columns={'variable' : 'cohort_id', 'value' : 'height'}, inplace=True)
girls_df['gender'] = 'girl'
#girls_df
growth_df = pd.concat([boys_df, girls_df])
growth_vs_times_df = growth_df
growth_vs_series_df = growth_df.drop(['age', 'height'], axis=1).drop_duplicates()
return (growth_vs_times_df, growth_vs_series_df)
def download_ecg_data_dfs():
zipped = download_zipfile('http://timeseriesclassification.com/Downloads/ECG200.zip')
tempdir = tempfile.TemporaryDirectory()
zipped.extract(member='ECG200/ECG200.csv', path=tempdir.name)
ecg_filepath = os.path.join(tempdir.name, 'ECG200/ECG200.csv')
raw_df = pd.read_csv(ecg_filepath, names=([str(x) for x in range(96)] + ['class_label']), skiprows=101)
raw_df['heartbeat'] = raw_df.index.values
ecg_vs_series = raw_df[['heartbeat', 'class_label']]
raw_df = raw_df.melt(id_vars = ['heartbeat', 'class_label'])
raw_df.rename(columns={ 'variable' : 'timestamp', 'value' : 'potential_difference' }, inplace=True)
raw_df['timestamp'] = raw_df['timestamp'].astype(int)
ecg_vs_times = raw_df[['heartbeat', 'timestamp', 'potential_difference']]
ecg_vs_series['is_abnormal'] = (ecg_vs_series['class_label'] == -1)
ecg_vs_series.drop('class_label', axis=1, inplace=True)
return (ecg_vs_times, ecg_vs_series)
# Design patterns used: Flyweight, Prototype.
class MultiSeries(LoggingHandler):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (:obj:`int`, optional): Description of `attr2`.
"""
# Pythonic way of doing read-only properties
@property
def count_features(self):
count = 0
if self._value_colnames_vs_times is not None:
count = count + len(self._value_colnames_vs_times)
if self._value_colnames_vs_series is not None:
count = count + len(self._value_colnames_vs_series)
return count
# Pythonic way of doing read-only properties
@property
def all_non_timestamp_feature_names(self):
res = self._value_colnames_vs_times
if self._value_colnames_vs_series is not None:
res = res + self._value_colnames_vs_series
return res
# Pythonic way of doing read-only properties
@property
def count_observations(self):
return self._select_df_obs_vs_times().shape[0]
def _inferValueColnames(self, data_df, time_colname, series_id_colnames, value_colnames, description, check_presence_of_time_colname=True):
if data_df is None:
return None
else:
all_colnames = list(data_df.columns.values)
if check_presence_of_time_colname and time_colname not in all_colnames:
raise Exception('time_colname ' + str(time_colname) + ' is not a column of the given data_df')
for c in series_id_colnames:
if c not in all_colnames:
raise Exception('series_id_colnames item ' + str(c) + ' is not a column of the given data_df')
given_colnames = [time_colname] + series_id_colnames
if value_colnames is None:
self.debug('value_colnames was not specified, so will infer from the data frame.')
other_colnames = list(np.setdiff1d(all_colnames, given_colnames))
value_colnames = other_colnames
self.info('Inferred ' + description +' value colnames = ' + str(value_colnames))
else:
given_colnames = given_colnames + value_colnames
other_colnames = list(np.setdiff1d(all_colnames, given_colnames))
self.info('The following col names will be dropped: ' + str(other_colnames))
return value_colnames
def __init__(self, time_colname, series_id_colnames, data_vs_times_df, data_vs_series_df=None, value_colnames_vs_times=None, value_colnames_vs_series=None):
"""Class methods are similar to regular functions.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
super(MultiSeries, self).__init__()
# Validation...
if type(data_vs_times_df) != pd.DataFrame:
raise Exception('data_vs_times_df must be a pandas DataFrame! Was ' + str(type(data_vs_times_df)) + ' instead.')
if not(data_vs_series_df is None) and type(data_vs_series_df) != pd.DataFrame:
raise Exception('data_vs_series_df must be a pandas DataFrame! Was ' + str(type(data_vs_series_df)) + ' instead.')
if type(time_colname) != str:
raise Exception('time_colname must be a string! Was ' + str(type(time_colname)) + ' instead.')
str_shape_data_vs_times_df = 'None'
if data_vs_times_df is not None:
str_shape_data_vs_times_df = str(data_vs_times_df.shape)
str_shape_data_vs_series_df = 'None'
if data_vs_series_df is not None:
str_shape_data_vs_series_df = str(data_vs_series_df.shape)
self.info('Initialising MultiSeries: data_vs_times_df.shape = ' + str_shape_data_vs_times_df + ', data_vs_times_df.shape = ' + str_shape_data_vs_series_df + ', time_colname = ' + str(time_colname) + ', series_id_colnames = ' + str(series_id_colnames) + ', value_colnames_vs_times = ' + str(value_colnames_vs_times) + ', value_colnames_vs_series = ' + str(value_colnames_vs_series))
# ... continued
if type(series_id_colnames) == str:
series_id_colnames = [series_id_colnames] # for convenience
if type(value_colnames_vs_times) == str:
value_colnames_vs_times = [value_colnames_vs_times] # for convenience
if type(value_colnames_vs_series) == str:
value_colnames_vs_series = [value_colnames_vs_series] # for convenience
self._time_colname = time_colname
self._series_id_colnames = series_id_colnames
self._value_colnames_vs_times = self._inferValueColnames(data_df=data_vs_times_df, value_colnames=value_colnames_vs_times, description='time-label', time_colname=time_colname, series_id_colnames=series_id_colnames)
self._value_colnames_vs_series = self._inferValueColnames(data_df=data_vs_series_df, value_colnames=value_colnames_vs_series, description='series-label', time_colname=time_colname, series_id_colnames=series_id_colnames, check_presence_of_time_colname=False)
# Define a mapping between multiple series_id_colnames and a single identifier column
self._series_id_colname = '***internal_series_id***' # don't clash with anything
if data_vs_times_df.columns.contains(self._series_id_colname) or ((data_vs_series_df is not None) and data_vs_series_df.columns.contains(self._series_id_colname)):
raise Exception('Special column ' + self._series_id_colname + ' already exists!')
self._series_id_colnames_mapping = data_vs_times_df[series_id_colnames].drop_duplicates().reset_index(drop=True)
if not(data_vs_series_df is None):
part_two = data_vs_series_df[series_id_colnames].drop_duplicates().reset_index(drop=True)
self._series_id_colnames_mapping = pd.concat([self._series_id_colnames_mapping, part_two], ignore_index=True)
self._series_id_colnames_mapping = self._series_id_colnames_mapping.drop_duplicates().reset_index(drop=True)
self._series_id_colnames_mapping[self._series_id_colname] = self._series_id_colnames_mapping.index.values
# Replace multiple series_id_colnames with a single idenfier column, for the time-label DF
self._data_vs_times_df = data_vs_times_df[[time_colname] + series_id_colnames + self._value_colnames_vs_times].copy() # take a copy before we start modifying it
self._data_vs_times_df = self._data_vs_times_df.merge(self._series_id_colnames_mapping)
self._data_vs_times_df.drop(axis=1, inplace=True, labels=self._series_id_colnames)
# Replace multiple series_id_colnames with a single idenfier column, for the series-label DF
if data_vs_series_df is None:
self._data_vs_series_df = None
else:
self._data_vs_series_df = data_vs_series_df[series_id_colnames + self._value_colnames_vs_series].copy() # take a copy before we start modifying it
self._data_vs_series_df = self._data_vs_series_df.merge(self._series_id_colnames_mapping)
self._data_vs_series_df.drop(axis=1, inplace=True, labels=self._series_id_colnames)
# After merging by columns
self._series_id_colnames_mapping.set_index(inplace=True, keys=self._series_id_colname)
# Validation of DF contents, for the time-label DF only
counts_by_time_and_series = self._data_vs_times_df.groupby([self._time_colname,self._series_id_colname]).size()
counts_by_time = self._data_vs_times_df.groupby([self._time_colname]).size()
counts_by_series = self._data_vs_times_df.groupby(self._series_id_colname).size()
self._count_time_indices = counts_by_time.size
self._count_series_indices = counts_by_series.size
duplicate_observations = counts_by_time_and_series[counts_by_time_and_series > 1]
if duplicate_observations.size > 0:
raise Exception('There are ' + str(duplicate_observations.size) + ' instances of more than one observation per series index + time index! Should be 0 instances.')
missing_observations = counts_by_time[counts_by_time < self._count_series_indices].size
if missing_observations > 0:
self.warning(str(missing_observations) + ' time indices have missing series observations')
missing_observations = counts_by_series[counts_by_series < self._count_time_indices].size
if missing_observations > 0:
self.warning(str(missing_observations) + ' series indices have missing time observations')
# Sort by single series identifier (for reproducibility) and time (to ensure CV works!)
# MultiIndex requires data to be sorted to work properly (source: http://pandas.pydata.org/pandas-docs/version/0.18.1/advanced.html)
self._data_vs_times_df.sort_values(axis=0, inplace=True, by=[self._series_id_colname, self._time_colname])
if self._data_vs_series_df is not None:
self._data_vs_series_df.sort_values(axis=0, inplace=True, by=[self._series_id_colname])
# I've never had cause to use this, so have left it commented for now.
# Drop any NA observations that are being passed in, now that we have extracted series & timestamp info. from them
#self._data_vs_times_df = self._data_vs_times_df.dropna().reset_index(drop=True)
#if self._data_vs_series_df is not None:
# self._data_vs_series_df = self._data_vs_series_df.dropna().reset_index(drop=True)
# Set an index consisting of series identifier and time, for the time-label DF...
self._data_vs_times_df.set_index(inplace=True, verify_integrity=False, keys=[self._series_id_colname, self._time_colname])
# ... and by series identifier only, for the series-label DF
if self._data_vs_series_df is not None:
self._data_vs_series_df.set_index(inplace=True, verify_integrity=False, keys=[self._series_id_colname])
# Prepare an index to | |
<reponame>Mikhail-QA/HS<filename>API/AdminPanel/admin_panel_filters/test_002_filter_homework_tab_home_school.py
import requests
import allure
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterGrades
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterSubjects
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterStatusDz
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterWeek
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterTypeDz
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterFormatAccess
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterMark
from API.AdminPanel.admin_panel_filters.request_list.url_filter_homework import FilterSchools
from API.setting_tests import LogInfoApi
parameter_array = ['homeworks', 'total', 'page', 'per_page']
success_total = 21
# total_standard = 21 # это стандартное значение кол-ва изначально отображающихся на странице ДЗ.
@allure.feature("Админка Домашние задания вкладка Домашняя школа Фильтр Школы")
@allure.story(
"Проверяю ответ статус кода в Школах. Во всех остальных фильтрах выбран параметр (Все)")
class TestFilterSchools:
def test_not_school(self):
with allure.step("Проверка параметра (Школа не указан)"):
url = FilterSchools.Not_school
not_school = requests.get(url, allow_redirects=False)
try:
not_school.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=not_school)
assert not_school.status_code == 200
assert list(not_school.json().keys()) == parameter_array
assert dict(not_school.json()).get('total') == success_total
def test_school_stolichniy_kit(self):
with allure.step("Проверка параметра (Школа Столичный-КИТ)"):
url = FilterSchools.school_stolichniy_kit
school_stolichniy_kit = requests.get(url, allow_redirects=False)
try:
school_stolichniy_kit.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=school_stolichniy_kit)
assert school_stolichniy_kit.status_code == 200
assert list(school_stolichniy_kit.json().keys()) == parameter_array
assert dict(school_stolichniy_kit.json()).get('total') == success_total
@allure.feature("Админка Домашние задания вкладка Домашняя школа фильтр Классы")
@allure.story("Проверяю ответ статус кода с 1 по 11 класс. Во всех остальных фильтрах (Все)")
class TestFilterGradesInHomework:
def test_all_subjects(self):
url = FilterGrades.klass_all
all_subjects = requests.get(url, allow_redirects=False)
try:
all_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=all_subjects)
assert all_subjects.status_code == 200
assert list(all_subjects.json().keys()) == parameter_array
assert dict(all_subjects.json()).get('total') == success_total
def test_one_subjects(self):
with allure.step("Проверка параметра (1) в классах"):
url = FilterGrades.klass_one
one_subjects = requests.get(url, allow_redirects=False)
try:
one_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=one_subjects)
assert one_subjects.status_code == 200
assert list(one_subjects.json().keys()) == parameter_array
assert dict(one_subjects.json()).get('total') == success_total
def test_two_subjects(self):
with allure.step("Проверка параметра (2) в классах"):
url = FilterGrades.klass_two
two_subjects = requests.get(url, allow_redirects=False)
try:
two_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=two_subjects)
assert two_subjects.status_code == 200
assert list(two_subjects.json().keys()) == parameter_array
assert dict(two_subjects.json()).get('total') == success_total
def test_three_subjects(self):
with allure.step("Проверка параметра (3) в классах"):
url = FilterGrades.klass_three
three_subjects = requests.get(url, allow_redirects=False)
try:
three_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=three_subjects)
assert three_subjects.status_code == 200
assert list(three_subjects.json().keys()) == parameter_array
assert dict(three_subjects.json()).get('total') == success_total
def test_four_subjects(self):
with allure.step("Проверка параметра (4) в классах"):
url = FilterGrades.klass_four
four_subjects = requests.get(url, allow_redirects=False)
try:
four_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=four_subjects)
assert four_subjects.status_code == 200
assert list(four_subjects.json().keys()) == parameter_array
assert dict(four_subjects.json()).get('total') == success_total
def test_five_subjects(self):
with allure.step("Проверка параметра (5) в классах"):
url = FilterGrades.klass_five
five_subjects = requests.get(url, allow_redirects=False)
try:
five_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=five_subjects)
assert five_subjects.status_code == 200
assert list(five_subjects.json().keys()) == parameter_array
assert dict(five_subjects.json()).get('total') == success_total
def test_six_subjects(self):
with allure.step("Проверка параметра (6) в классах"):
url = FilterGrades.klass_six
six_subjects = requests.get(url, allow_redirects=False)
try:
six_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=six_subjects)
assert six_subjects.status_code == 200
assert list(six_subjects.json().keys()) == parameter_array
assert dict(six_subjects.json()).get('total') == success_total
def test_seven_subjects(self):
with allure.step("Проверка параметра (7) в классах"):
url = FilterGrades.klass_seven
seven_subjects = requests.get(url, allow_redirects=False)
try:
seven_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=seven_subjects)
assert seven_subjects.status_code == 200
assert list(seven_subjects.json().keys()) == parameter_array
assert dict(seven_subjects.json()).get('total') == success_total
def test_eight_subjects(self):
with allure.step("Проверка параметра (8) в классах"):
url = FilterGrades.klass_eight
eight_subjects = requests.get(url, allow_redirects=False)
try:
eight_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=eight_subjects)
assert eight_subjects.status_code == 200
assert list(eight_subjects.json().keys()) == parameter_array
assert dict(eight_subjects.json()).get('total') == success_total
def test_nine_subjects(self):
with allure.step("Проверка параметра (9) в классах"):
url = FilterGrades.klass_nine
nine_subjects = requests.get(url, allow_redirects=False)
try:
nine_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=nine_subjects)
assert nine_subjects.status_code == 200
assert list(nine_subjects.json().keys()) == parameter_array
assert dict(nine_subjects.json()).get('total') == success_total
def test_ten_subjects(self):
with allure.step("Проверка параметра (10) в классах"):
url = FilterGrades.klass_ten
ten_subjects = requests.get(url, allow_redirects=False)
try:
ten_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=ten_subjects)
assert ten_subjects.status_code == 200
assert list(ten_subjects.json().keys()) == parameter_array
assert dict(ten_subjects.json()).get('total') == success_total
def test_eleven_subjects(self):
with allure.step("Проверка параметра (11) в классах"):
url = FilterGrades.klass_eleven
eleven_subjects = requests.get(url, allow_redirects=False)
try:
eleven_subjects.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=eleven_subjects)
assert eleven_subjects.status_code == 200
assert list(eleven_subjects.json().keys()) == parameter_array
assert dict(eleven_subjects.json()).get('total') == success_total
@allure.feature("Админка Домашние задания вкладка Домашняя школа Фильтр Предметы")
@allure.story(
"Проверяю ответ статус кода с первого предмета по последний. Во всех остальных фильтрах (Все)")
class TestFilterSubjectsInHomework:
def test_subjects_russian_language(self):
with allure.step("Проверка параметра (Русский язык) в предметах"):
url = FilterSubjects.subject_russian_language
russian_language = requests.get(url, allow_redirects=False)
try:
russian_language.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=russian_language)
assert russian_language.status_code == 200
assert list(russian_language.json().keys()) == parameter_array
assert dict(russian_language.json()).get('total') == success_total
def test_subjects_literatura(self):
with allure.step("Проверка параметра (Литература) в предметах"):
url = FilterSubjects.subject_literatura
literatura = requests.get(url, allow_redirects=False)
try:
literatura.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=literatura)
assert literatura.status_code == 200
assert list(literatura.json().keys()) == parameter_array
assert dict(literatura.json()).get('total') == success_total
def test_subjects_english_language(self):
with allure.step("Проверка параметра (Английский язык) в предметах"):
url = FilterSubjects.subject_english_language
english_language = requests.get(url, allow_redirects=False)
try:
english_language.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=english_language)
assert english_language.status_code == 200
assert list(english_language.json().keys()) == parameter_array
assert dict(english_language.json()).get('total') == success_total
def test_subjects_matematika(self):
with allure.step("Проверка параметра (Математика) в предметах"):
url = FilterSubjects.subject_matematika
matematika = requests.get(url, allow_redirects=False)
try:
matematika.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=matematika)
assert matematika.status_code == 200
assert list(matematika.json().keys()) == parameter_array
assert dict(matematika.json()).get('total') == success_total
def test_subjects_history(self):
with allure.step("Проверка параметра (История) в предметах"):
url = FilterSubjects.subject_history
history = requests.get(url, allow_redirects=False)
try:
history.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=history)
assert history.status_code == 200
assert list(history.json().keys()) == parameter_array
assert dict(history.json()).get('total') == success_total
def test_subject_prirodavedenie(self):
with allure.step("Проверка параметра (Природоведние) в предметах"):
url = FilterSubjects.subject_prirodavedenie
prirodavedenie = requests.get(url, allow_redirects=False)
try:
prirodavedenie.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=prirodavedenie)
assert prirodavedenie.status_code == 200
assert list(prirodavedenie.json().keys()) == parameter_array
assert dict(prirodavedenie.json()).get('total') <= success_total
def test_subject_objestvoznanie(self):
with allure.step("Проверка параметра (Обществознание) в предметах"):
url = FilterSubjects.subject_objestvoznanie
objestvoznanie = requests.get(url, allow_redirects=False)
try:
objestvoznanie.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=objestvoznanie)
assert objestvoznanie.status_code == 200
assert list(objestvoznanie.json().keys()) == parameter_array
assert dict(objestvoznanie.json()).get('total') == success_total
def test_subject_geografia(self):
with allure.step("Проверка параметра (География) в предметах"):
url = FilterSubjects.subject_geografia
geografia = requests.get(url, allow_redirects=False)
try:
geografia.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=geografia)
assert geografia.status_code == 200
assert list(geografia.json().keys()) == parameter_array
assert dict(geografia.json()).get('total') == success_total
def test_subject_biologia(self):
with allure.step("Проверка параметра (Биология) в предметах"):
url = FilterSubjects.subject_biologia
biologia = requests.get(url, allow_redirects=False)
try:
biologia.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=biologia)
assert biologia.status_code == 200
assert list(biologia.json().keys()) == parameter_array
assert dict(biologia.json()).get('total') == success_total
def test_subject_algebra_standart(self):
with allure.step("Проверка параметра (Алгебра.Стандартный курс) в предметах"):
url = FilterSubjects.subject_algebra_standart
algebra_standart = requests.get(url, allow_redirects=False)
try:
algebra_standart.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=algebra_standart)
assert algebra_standart.status_code == 200
assert list(algebra_standart.json().keys()) == parameter_array
assert dict(algebra_standart.json()).get('total') == success_total
def test_subject_geometria_standart(self):
with allure.step("Проверка параметра (Геометрия.Стандартный курс) в предметах"):
url = FilterSubjects.subject_geometria_standart
geometria_standart = requests.get(url, allow_redirects=False)
try:
geometria_standart.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=geometria_standart)
assert geometria_standart.status_code == 200
assert list(geometria_standart.json().keys()) == parameter_array
assert dict(geometria_standart.json()).get('total') == success_total
def test_subject_fizika_standart(self):
with allure.step("Проверка параметра (Физика.Стандартный курс) в предметах"):
url = FilterSubjects.subject_fizika_standart
fizika_standart = requests.get(url, allow_redirects=False)
try:
fizika_standart.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=fizika_standart)
assert fizika_standart.status_code == 200
assert list(fizika_standart.json().keys()) == parameter_array
assert dict(fizika_standart.json()).get('total') == success_total
def test_subject_informatika(self):
with allure.step("Проверка параметра (Информатика) в предметах"):
url = FilterSubjects.subject_informatika
informatika = requests.get(url, allow_redirects=False)
try:
informatika.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=informatika)
assert informatika.status_code == 200
assert list(informatika.json().keys()) == parameter_array
assert dict(informatika.json()).get('total') == success_total
def test_subject_himiya(self):
with allure.step("Проверка параметра (Химия) в предметах"):
url = FilterSubjects.subject_himiya
himiya = requests.get(url, allow_redirects=False)
try:
himiya.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=himiya)
assert himiya.status_code == 200
assert list(himiya.json().keys()) == parameter_array
assert dict(himiya.json()).get('total') == success_total
def test_subject_algebra(self):
with allure.step("Проверка параметра (Алгебра) в предметах"):
url = FilterSubjects.subject_algebra
algebra = requests.get(url, allow_redirects=False)
try:
algebra.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=algebra)
assert algebra.status_code == 200
assert list(algebra.json().keys()) == parameter_array
assert dict(algebra.json()).get('total') == success_total
def test_subject_geometria(self):
with allure.step("Проверка параметра (Геометрия) в предметах"):
url = FilterSubjects.subject_geometria
geometria = requests.get(url, allow_redirects=False)
try:
geometria.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=geometria)
assert geometria.status_code == 200
assert list(geometria.json().keys()) == parameter_array
assert dict(geometria.json()).get('total') == success_total
def test_subject_fizika(self):
with allure.step("Проверка параметра (Физика) в предметах"):
url = FilterSubjects.subject_fizika
fizika = requests.get(url, allow_redirects=False)
try:
fizika.raise_for_status()
except requests.exceptions.HTTPError as e:
print('ERROR: %s' % e)
LogInfoApi.log_info(log=fizika)
assert fizika.status_code | |
thread = emu.get_current_thread()
rv = thread.get_id()
return rv
@apihook('GetCurrentProcessId', argc=0)
def GetCurrentProcessId(self, emu, argv, ctx={}):
'''DWORD GetCurrentProcessId();'''
proc = emu.get_current_process()
rv = proc.get_id()
return rv
@apihook('IsProcessorFeaturePresent', argc=1,
conv=e_arch.CALL_CONV_STDCALL)
def IsProcessorFeaturePresent(self, emu, argv, ctx={}):
'''BOOL IsProcessorFeaturePresent(
DWORD ProcessorFeature
);'''
rv = 1
lookup = {
25: 'PF_ARM_64BIT_LOADSTORE_ATOMIC',
24: 'PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE',
26: 'PF_ARM_EXTERNAL_CACHE_AVAILABLE',
27: 'PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE',
18: 'PF_ARM_VFP_32_REGISTERS_AVAILABLE',
7: 'PF_3DNOW_INSTRUCTIONS_AVAILABLE',
16: 'PF_CHANNELS_ENABLED',
2: 'PF_COMPARE_EXCHANGE_DOUBLE',
14: 'PF_COMPARE_EXCHANGE128',
15: 'PF_COMPARE64_EXCHANGE128',
23: 'PF_FASTFAIL_AVAILABLE',
1: 'PF_FLOATING_POINT_EMULATED',
0: 'PF_FLOATING_POINT_PRECISION_ERRATA',
3: 'PF_MMX_INSTRUCTIONS_AVAILABLE',
12: 'PF_NX_ENABLED',
9: 'PF_PAE_ENABLED',
8: 'PF_RDTSC_INSTRUCTION_AVAILABLE',
22: 'PF_RDWRFSGSBASE_AVAILABLE',
20: 'PF_SECOND_LEVEL_ADDRESS_TRANSLATION',
13: 'PF_SSE3_INSTRUCTIONS_AVAILABLE',
21: 'PF_VIRT_FIRMWARE_ENABLED',
6: 'PF_XMMI_INSTRUCTIONS_AVAILABLE',
10: 'PF_XMMI64_INSTRUCTIONS_AVAILABLE',
17: 'PF_XSAVE_ENABLED',
}
argv[0] = lookup[argv[0]]
return rv
@apihook('lstrcmpi', argc=2)
def lstrcmpi(self, emu, argv, ctx={}):
'''int lstrcmpiA(
LPCSTR lpString1,
LPCSTR lpString2
);'''
cw = self.get_char_width(ctx)
string1, string2 = argv
rv = 1
cs1 = self.read_mem_string(string1, cw)
cs2 = self.read_mem_string(string2, cw)
argv[0] = cs1
argv[1] = cs2
if cs1.lower() == cs2.lower():
rv = 0
return rv
@apihook('lstrcmp', argc=2)
def lstrcmp(self, emu, argv, ctx={}):
'''int lstrcmpiA(
LPCSTR lpString1,
LPCSTR lpString2
);'''
cw = self.get_char_width(ctx)
string1, string2 = argv
rv = 1
cs1 = self.read_mem_string(string1, cw)
cs2 = self.read_mem_string(string2, cw)
argv[0] = cs1
argv[1] = cs2
if cs1 == cs2:
rv = 0
return rv
@apihook('QueryPerformanceCounter', argc=1)
def QueryPerformanceCounter(self, emu, argv, ctx={}):
'''BOOL WINAPI QueryPerformanceCounter(
_Out_ LARGE_INTEGER *lpPerformanceCount
);'''
lpPerformanceCount, = argv
rv = 1
self.mem_write(lpPerformanceCount,
self.perf_counter.to_bytes(8, 'little'))
return rv
@apihook('lstrlen', argc=1)
def lstrlen(self, emu, argv, ctx={}):
'''
int lstrlen(
LPCSTR lpString
);
'''
src, = argv
try:
cw = self.get_char_width(ctx)
except Exception:
cw = 1
s = self.read_mem_string(src, cw)
argv[0] = s
return len(s)
@apihook('GetModuleHandleEx', argc=3)
def GetModuleHandleEx(self, emu, argv, ctx={}):
'''
BOOL GetModuleHandleExA(
DWORD dwFlags,
LPCSTR lpModuleName,
HMODULE *phModule
);
'''
dwFlags, lpModuleName, phModule = argv
hmod = self.GetModuleHandle(emu, [lpModuleName], ctx)
if phModule:
_mod = (hmod).to_bytes(emu.get_ptr_size(), 'little')
self.mem_write(phModule, _mod)
return hmod
@apihook('GetModuleHandle', argc=1)
def GetModuleHandle(self, emu, argv, ctx={}):
'''HMODULE GetModuleHandle(
LPCSTR lpModuleName
);'''
mod_name, = argv
cw = self.get_char_width(ctx)
rv = 0
if not mod_name:
proc = emu.get_current_process()
rv = proc.base
else:
lib = self.read_mem_string(mod_name, cw)
argv[0] = lib
sname, _ = os.path.splitext(lib)
sname = winemu.normalize_dll_name(sname)
mods = emu.get_user_modules()
for mod in mods:
img = ntpath.basename(mod.get_emu_path())
fname, _ = os.path.splitext(img)
if fname.lower() == sname.lower():
rv = mod.get_base()
break
return rv
@apihook('GetProcAddress', argc=2)
def GetProcAddress(self, emu, argv, ctx={}):
'''FARPROC GetProcAddress(
HMODULE hModule,
LPCSTR lpProcName
);'''
hmod, proc_name = argv
rv = 0
proc = ''
if proc_name:
try:
proc = self.read_mem_string(proc_name, 1)
argv[1] = proc
except Exception:
if isinstance(proc_name, int) and proc_name < 0xFFFF:
# Import is most likely an ordinal
proc = 'ordinal_%d' % proc_name
if proc:
mods = emu.get_user_modules()
for mod in mods:
if mod.get_base() == hmod:
bn = mod.get_base_name()
mname, _ = os.path.splitext(bn)
rv = emu.get_proc(mname, proc)
return rv
@apihook('GetConsoleWindow', argc=0)
def GetConsoleWindow(self, emu, argv, ctx={}):
'''HWND WINAPI GetConsoleWindow(void);'''
hwnd = 0
proc = emu.get_current_process()
console = proc.get_console()
if console:
win = console.get_window()
hwnd = win.handle
return hwnd
@apihook('Sleep', argc=1)
def Sleep(self, emu, argv, ctx={}):
'''void Sleep(DWORD dwMilliseconds);'''
millisec, = argv
return
@apihook('SleepEx', argc=2)
def SleepEx(self, emu, argv, ctx={}):
'''DWORD SleepEx(DWORD dwMilliseconds, BOOL bAlertable);
'''
millisec, bAlertable = argv
return
@apihook('GlobalAlloc', argc=2)
def GlobalAlloc(self, emu, argv, ctx={}):
'''
DECLSPEC_ALLOCATOR HGLOBAL GlobalAlloc(
UINT uFlags,
SIZE_T dwBytes
);
'''
uFlags, dwBytes = argv
chunk = self.heap_alloc(dwBytes, heap='GlobalAlloc')
return chunk
@apihook('GlobalSize', argc=1)
def GlobalSize(self, emu, argv, ctx={}):
'''
SIZE_T GlobalSize(
[in] HGLOBAL hMem
);
'''
hMem, = argv
size = 0
for mmap in emu.get_mem_maps():
if hMem == mmap.get_base():
size = mmap.get_size()
emu.set_last_error(windefs.ERROR_SUCCESS)
if not size:
emu.set_last_error(windefs.ERROR_INVALID_PARAMETER)
return size
@apihook('LocalAlloc', argc=2)
def LocalAlloc(self, emu, argv, ctx={}):
'''
DECLSPEC_ALLOCATOR HLOCAL LocalAlloc(
UINT uFlags,
SIZE_T uBytes
);
'''
uFlags, dwBytes = argv
chunk = self.heap_alloc(dwBytes, heap='LocalAlloc')
return chunk
@apihook('HeapAlloc', argc=3)
def HeapAlloc(self, emu, argv, ctx={}):
'''
DECLSPEC_ALLOCATOR LPVOID HeapAlloc(
HANDLE hHeap,
DWORD dwFlags,
SIZE_T dwBytes
);
'''
hHeap, dwFlags, dwBytes = argv
chunk = self.heap_alloc(dwBytes, heap='HeapAlloc')
if chunk:
emu.set_last_error(windefs.ERROR_SUCCESS)
return chunk
@apihook('HeapSize', argc=3)
def HeapSize(self, emu, argv, ctx={}):
'''
SIZE_T HeapSize(
HANDLE hHeap,
DWORD dwFlags,
LPCVOID lpMem
);
'''
hHeap, dwFlags, lpMem = argv
size = 0
for mmap in emu.get_mem_maps():
if lpMem == mmap.get_base():
size = mmap.get_size()
emu.set_last_error(windefs.ERROR_SUCCESS)
if not size:
emu.set_last_error(windefs.ERROR_INVALID_PARAMETER)
return size
@apihook('GetTickCount', argc=0)
def GetTickCount(self, emu, argv, ctx={}):
'''
DWORD GetTickCount();
'''
self.tick_counter += 20
return self.tick_counter
@apihook('GetTickCount64', argc=0)
def GetTickCount64(self, emu, argv, ctx={}):
'''
ULONGLONG GetTickCount64();
'''
self.tick_counter += 20
return self.tick_counter
@apihook('lstrcat', argc=2)
def lstrcat(self, emu, argv, ctx={}):
'''
LPSTR lstrcat(
LPSTR lpString1,
LPCSTR lpString2
);
'''
lpString1, lpString2 = argv
cw = self.get_char_width(ctx)
s1 = self.read_mem_string(lpString1, cw)
s2 = self.read_mem_string(lpString2, cw)
argv[0] = s1
argv[1] = s2
if cw == 2:
new = (s1 + s2).encode('utf-16le')
else:
new = (s1 + s2).encode('utf-8')
self.mem_write(lpString1, new + b'\x00')
return lpString1
@apihook('lstrcpyn', argc=3)
def lstrcpyn(self, emu, argv, ctx={}):
'''
LPSTR lstrcpynA(
LPSTR lpString1,
LPCSTR lpString2,
int iMaxLength
);
'''
dest, src, iMaxLength = argv
cw = self.get_char_width(ctx)
s = self.read_mem_string(src, cw)
argv[1] = s
s = s[:iMaxLength - 1]
s += '\x00'
self.write_mem_string(s, dest, cw)
return dest
@apihook('lstrcpy', argc=2)
def lstrcpy(self, emu, argv, ctx={}):
'''
LPSTR lstrcpyA(
LPSTR lpString1,
LPCSTR lpString2
);
'''
dest, src = argv
cw = self.get_char_width(ctx)
s = self.read_mem_string(src, cw)
argv[1] = s
s += '\x00'
self.write_mem_string(s, dest, cw)
return dest
@apihook('IsBadReadPtr', argc=2)
def IsBadReadPtr(self, emu, argv, ctx={}):
'''
BOOL IsBadReadPtr(
const VOID *lp,
UINT_PTR ucb
);
'''
lp, ucb = argv
rv = True
if lp and ucb:
v1 = emu.is_address_valid(lp)
v2 = emu.is_address_valid(lp + (ucb - 1))
if v1 and v2:
rv = False
return rv
@apihook('HeapReAlloc', argc=4)
def HeapReAlloc(self, emu, argv, ctx={}):
'''
DECLSPEC_ALLOCATOR LPVOID HeapReAlloc(
HANDLE hHeap,
DWORD dwFlags,
_Frees_ptr_opt_ LPVOID lpMem,
SIZE_T dwBytes
);
'''
hHeap, dwFlags, lpMem, dwBytes = argv
tag_prefix = 'api.heap'
new_buf = 0
if hHeap and lpMem and dwBytes:
mm = emu.get_address_map(lpMem)
if mm and mm.get_tag().startswith(tag_prefix):
# Copy the existing data
data = self.mem_read(lpMem, mm.get_size())
new_buf = self.heap_alloc(dwBytes, heap='HeapReAlloc')
self.mem_write(new_buf, data)
return new_buf
@apihook('LocalReAlloc', argc=3)
def LocalReAlloc(self, emu, argv, ctx={}):
'''
DECLSPEC_ALLOCATOR HLOCAL LocalReAlloc(
_Frees_ptr_opt_ HLOCAL hMem,
SIZE_T uBytes,
UINT uFlags
);
'''
hMem, uBytes, uFlags = argv
tag_prefix = 'api.heap'
new_buf = 0
if hMem and uBytes:
mm = emu.get_address_map(hMem)
if mm and mm.get_tag().startswith(tag_prefix):
# Copy the existing data
data = self.mem_read(hMem, mm.get_size())
new_buf = self.heap_alloc(uBytes, heap='LocalReAlloc')
self.mem_write(new_buf, data)
return new_buf
@apihook('HeapCreate', argc=3)
def HeapCreate(self, emu, argv, ctx={}):
'''
HANDLE HeapCreate(
DWORD flOptions,
SIZE_T dwInitialSize,
SIZE_T dwMaximumSize
);
'''
flOptions, dwInitialSize, dwMaximumSize = argv
heap = self.create_heap(emu)
return heap
@apihook('GetCurrentThread', argc=0)
def GetCurrentThread(self, emu, argv, ctx={}):
'''
HANDLE GetCurrentThread();
'''
thread = emu.get_current_thread()
obj = emu.om.get_object_from_addr(thread.address)
return emu.get_object_handle(obj)
@apihook('TlsAlloc', argc=0)
def TlsAlloc(self, emu, argv, ctx={}):
'''
DWORD TlsAlloc();
'''
thread = emu.get_current_thread()
tls = thread.get_tls()
tls.append(0)
thread.set_tls(tls)
idx = len(tls) - 1
return idx
@apihook('TlsSetValue', argc=2)
def TlsSetValue(self, emu, argv, ctx={}):
'''
BOOL TlsSetValue(
DWORD dwTlsIndex,
LPVOID lpTlsValue
);
'''
dwTlsIndex, lpTlsValue = argv
rv = 0
thread = emu.get_current_thread()
tls = thread.get_tls()
if dwTlsIndex < len(tls):
tls[dwTlsIndex] = lpTlsValue
thread.set_tls(tls)
rv = 1
emu.set_last_error(windefs.ERROR_SUCCESS)
else:
emu.set_last_error(windefs.ERROR_INVALID_PARAMETER)
return rv
@apihook('TlsGetValue', argc=1)
def TlsGetValue(self, emu, argv, ctx={}):
'''
LPVOID TlsGetValue(
DWORD dwTlsIndex
);
'''
dwTlsIndex, = argv
rv = 0
thread = emu.get_current_thread()
tls = thread.get_tls()
if dwTlsIndex < len(tls):
rv = tls[dwTlsIndex]
emu.set_last_error(windefs.ERROR_SUCCESS)
else:
emu.set_last_error(windefs.ERROR_INVALID_PARAMETER)
return rv
@apihook('FlsAlloc', argc=1)
def FlsAlloc(self, emu, argv, ctx={}):
'''
DWORD FlsAlloc(
PFLS_CALLBACK_FUNCTION lpCallback
);
'''
thread = emu.get_current_thread()
fls = thread.get_fls()
fls.append(0)
thread.set_fls(fls)
idx = len(fls) - 1
return idx
@apihook('FlsSetValue', argc=2)
def FlsSetValue(self, emu, argv, ctx={}):
'''
BOOL FlsSetValue(
DWORD dwFlsIndex,
PVOID lpFlsData
);
'''
dwFlsIndex, lpFlsData = argv
rv = 0
thread = emu.get_current_thread()
fls = thread.get_fls()
if len(fls) == 0:
fls.append(0)
if dwFlsIndex < len(fls):
fls[dwFlsIndex] = lpFlsData
thread.set_fls(fls)
rv = 1
emu.set_last_error(windefs.ERROR_SUCCESS)
else:
emu.set_last_error(windefs.ERROR_INVALID_PARAMETER)
return rv
@apihook('FlsGetValue', argc=1)
def FlsGetValue(self, emu, argv, ctx={}):
'''
PVOID FlsGetValue(
DWORD dwFlsIndex
);
'''
dwFlsIndex, = argv
rv = 0
thread = emu.get_current_thread()
fls = thread.get_fls()
if dwFlsIndex < len(fls):
| |
#!/usr/bin/env python3
import datetime
import dateutil.parser
import os
from path import cd
import simplejson as json
import sqlite3
import subprocess
import sys
import yaml
import log
warn, info, debug, fatal = log.reporters()
class UnsupportedDBType(Exception):
pass
class DBNotFound(Exception):
pass
class DBConn(object):
def __init__(self, db_name="development", db_conf_file="", connect=True):
"""Open a database connection, creating db if needed, and generally
get ready to store stuff.
DB_NAME is the name of the database to target from dbconf.yml.
If DB_CONF_FILE isn't specified, we use a stock one of defaults.
Goose migrations used dbconf.yml files, so for convenience, we
just read any needed data from that file.
If CONNECT is true, we open a db connection.
"""
self.db_name = db_name
if os.path.exists(db_conf_file):
# slurp dbconf.yml
with open(db_conf_file) as INF:
self.db_conf = yaml.load(INF)[db_name]
else:
info("dbconf.yml not found, using default config values (db will be leie.sqlite3)")
self.db_name = "development"
self.db_conf = yaml.load("development:\n driver: sqlite3\n open: leie.sqlite3\n")[self.db_name]
# If we're not opening a connection, we're done
if not connect:
return
# open and hang on to a db connection for later use
if self.db_conf['driver'] == 'sqlite3':
self.conn = sqlite3.connect(self.db_conf['open'])
else:
raise UnsupportedDBType("We don't support databases of type %s" % self.db_conf['driver'])
def close(self):
"""Commit and close the db connection"""
self.conn.commit()
self.conn.close()
def table_len(self, table):
"""Return the number of total rows in the TABLE"""
c = self.conn.cursor()
return (c.execute("SELECT Count(*) FROM %s" % table).fetchone()[0])
def row_to_dict(self, row, field=None, description=None):
"""
FIELD is a list or tuple of field names
DESCRIPTION is the results of cursor.description from sqlite
Either FIELD or DESCRIPTION must be present, but not both.
ROW is a tuple of values
Returns a dict with the keys taken from FIELD and the values taken from ROW.
"""
assert field or description
assert not (field and description)
if description:
field = [c[0] for c in description]
field = ['id' if f == 'rowid' else f for f in field]
return dict(zip(field, row))
class SQL(DBConn):
"""All the sql and goose stuff goes in this class.
We generate the SQL here becuase in the future I think we might want some
smart/scripted way to manage sql for different DB types."""
def down(self, migration):
"""Returns schema sql for migrating the db down
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
if migration == 0:
return """
DROP TABLE exclusion;
DROP TABLE reinstatement;
"""
if migration == 1:
return "DROP TABLE log;"
def goose(self):
"""Returns a dict of goose migrations. The keys are filenames and the
values are the contents of the goose files.
We only have one migration so far, so this is pretty easy.
"""
fnames = ["20170515130501_initial_create.sql"
,"20170606100001_create_log.sql"
]
migrations = {}
for a in range(len(fnames)):
migrations[fnames[a]] = "-- +goose Up\n" + self.up(a) + "\n-- +goose Down\n" + self.down(a) + "\n"
return migrations
def goose_write(self, dirname=None):
"""Writes any needed migration files to the migrations directory
specified by DIRNAME. Leave DIRNAME as None to just use
./db as the migrations directory.
Returns list of paths to created files.
"""
if not dirname:
dirname = os.path.join(os.path.dirname(__file__), "db")
dirname = os.path.join(dirname, self.db_conf['driver'])
os.makedirs(dirname, exist_ok=True)
created = []
for fname, migration in self.goose().items():
fname = os.path.join(dirname, fname)
if os.path.exists(fname):
debug("Migration " +fname+" already exists. Overwriting.")
created.append(fname)
info("Writing migration to " + fname)
with open(fname, 'w') as OUTF:
OUTF.write(migration)
return created
def migrate(self):
"""Bring the db schema up to date by running any needed model
migrations."""
debug(self.db_conf)
dirname = os.path.dirname(self.db_conf['open'])
if not dirname:
dirname = os.path.dirname(__file__)
with cd(dirname):
# Make sure the sqlite3 db exists before we try to migrate it
if not os.path.exists(os.path.basename(self.db_conf['open'])):
raise DBNotFound("DB %s doesn't exist, so we can't migrate it." % self.db_conf['open'])
# Goose apparently returns 0 even when it errors, so we
# have to check stderr and react accordingly.
cmd = "goose -dir db/{0} {0} {1} up".format(self.db_conf['driver'], os.path.basename(self.db_conf['open']))
debug("Executing `%s`" % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
out = out.decode("utf-8")
err = err.decode("utf-8")
if p.returncode != 0:
sys.stderr.write("%s\n%s" % (out, err))
raise subprocess.CalledProcessError(p.returncode, cmd, out+err)
return out
def up(self, migration):
"""Returns schema sql for migrating the db up.
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
# We only handle sqlite for now
if self.db_conf['driver'] != "sqlite3":
raise UnsupportedDBType("We don't have migrations for %s" % self.db_conf['driver'])
if migration == 0:
common_rows = """
lastname text check(lastname is null or length(lastname) <= 20),
firstname text check(firstname is null or length(firstname) <= 15),
midname text check(midname is null or length(midname) <= 15),
busname text check(busname is null or length(busname) <= 30),
general text check(general is null or length(general) <= 20),
specialty text check(specialty is null or length(specialty) <= 20),
upin text check(upin is null or length(upin) <= 6),
npi integer check(npi is null or npi<10000000000),
dob text check(dob is null or length(dob) <= 23),
address text check(address is null or length(address) <= 30),
city text check(city is null or length(city) <= 20),
state text check(state is null or length(state) <= 2),
zip integer check(zip is null or zip < 100000),
excltype text not null check(excltype is null or length(excltype) <= 8),
excldate text not null check(excldate is null or length(excldate) <= 23),
reindate text check(reindate is null or length(reindate) <= 23),
waiverdate text check(waiverdate is null or length(waiverdate) <= 23),
waiverstate text check(waiverstate is null or length(waiverstate) <= 2)
"""
return("CREATE TABLE IF NOT EXISTS exclusion (" + common_rows + ");\n"
+ "CREATE TABLE IF NOT EXISTS reinstatement (" + common_rows + ");\n")
elif migration == 1:
return """
CREATE TABLE IF NOT EXISTS log (
datetime text,
datatype text,
msg text);
"""
else:
return None
class LEIE(SQL):
"""This is a DAO class but not an ORM class. We're modeling the
database, not the data. Maybe that will change, but it works for
now.
"""
def count_exclusions(self):
"""Return number of rows in the exclusion table"""
return self.table_len("exclusion")
def dedupe(self, table):
"""
Remove any duplicate rows from TABLE
"""
# Look for duplicate entries
seen = set()
uniq = []
dup = []
c = self.conn.cursor()
for x in c.execute("SELECT * FROM %s" % table).fetchall():
if x not in seen:
uniq.append(x)
seen.add(x)
else:
dup.append(x)
# We're done if there are no dupes
if not dup:
return
# Uh-oh, better fess up and clean up
warn("Duplicate reinstatements found in %s!" % table)
info("Cleaning duplicate reinstatements from %s" % table)
c.execute("delete from {0} where rowid not in (select max(rowid) from {0} group by {1})".format(
table,
", ".join(self.get_header(table))
))
def dedupe_reinstatements(self):
"""
Make sure there are no duplicate rows in the reinstatement table.
"""
self.dedupe("reinstatement")
def get_download_datetime(self, fname):
"""Return the logged time of the last download of the file named FNAME
If it's not there, return None"""
c = self.conn.cursor()
all = c.execute("SELECT * FROM log WHERE msg=?", ["Downloaded " + fname]).fetchall()
if not all:
return None
return dateutil.parser.parse(all[-1][0])
def get_exclusions(self, limit=10, page=1, filter={}, form="list"):
"""Return all the rows from the log table up to LIMIT rows
FORM can be 'list' or 'dict'. If 'list', return rows as
lists. If dict, return rows as dicts.
If PAGE is specified, we skip the first (PAGE-1)*LIMIT rows
and return LIMIT rows from there.
"""
assert form in ["list", "dict"]
assert page >= 1
assert limit >= 1
crsr = self.conn.cursor()
# Make strings for the filters to be inserted in to the sql
# query. Also, make a list of arguments for the query.
args = [limit*(page-1)]
query = ["SELECT rowid, * FROM exclusion",
"WHERE rowid NOT IN ( SELECT rowid FROM exclusion ORDER BY excldate DESC LIMIT ?)"
]
for k,v in filter.items():
if v:
query.append("AND %s=?" % k)
args.append(v)
query.append("ORDER BY excldate DESC LIMIT ?")
args.append(limit)
# Return a range of rows
rows = crsr.execute(" ".join(query), args).fetchall()
if form == 'list':
return rows
return [Exclusion(self.row_to_dict(r, description=crsr.description)) for r in rows]
| |
<gh_stars>1-10
"""Very Easy Multithreading.
If you want to call the same function on many inputs, in a multithreaded
fashion, ```meanwhile``` makes it easy.
It can make your code significantly faster, especially if the function requires
I/O operations, like file access or HTTP(S) queries.
Simple Usage Example:
Suppose you have a function named `test_url`, that gets a URL, downloads
its content, and tests whether it contains the word "meanwhile". Also,
suppose you have a file, named `urls.txt`, where each line contains a URL
you would like to apply `test_url` to.
You can do the following:
>>> from meanwhile import Job
>>> job = Job(test_url, 10) # at most 10 threads will be used concurrently.
>>> urls = open("urls.txt").read().splitlines()
>>> job.add_many(urls)
>>> job.wait()
>>> results = job.get_results()
The target function (in the example: `test_url`) should get one argument, and
this argument should be hashable.
Note that if your function prints output, you probably want to use
`meanwhile.print()` instead of Python's built-in `print()` function.
This function prevents conflicts both with other threads, and with the progress
updates shown by the `wait` method.
For the full documentation, see https://github.com/hagai-helman/meanwhile.
"""
from threading import Thread, Lock
from queue import Queue, Empty
from time import time, sleep
from random import choice
# Console Output And Printing
# ***************************
#
# The method Job.wait() provides progress updates by printing a status line.
# Hence we need functions to overwrite status.
#
# The function meanwhile.print() is a wrapper for the builtin print() function
# that is more thread-safe, and also works well with the status printing
# feature.
# We stash the builtin print() function as meanwhile._print();
_print = print
# We use two global variables for printing:
# 1. _plock is a global print-lock;
# 2. _status contains the last status, and is used for both overwriting and
# rewriting.
_plock = Lock()
_status = None
# _hide_status() and _show_status() are the two functions that handle
# writing and overwriting the status.
def _hide_status():
"""Remove previous status from screen.
"""
# Overwrite previous status with whitespaces, and return cursor to the
# beginning of the line.
#
# Note that each character is replaced with a ' ', except '\t' that
# just moves the cursor.
#
# Some special characters, like '\n', are mishandled; That's OK, we
# don't expect them to appear in status.
global _status
if _status is not None:
space = ""
for c in _status:
if c == '\t':
space += "\t"
else:
space += " "
_print("\r" + space + "\r", end = "", flush = True)
_status = None
def _show_status(status):
"""Replace the current status shown on screen with a new status.
"""
global _status
_hide_status()
_print(status, end="", flush=True)
_status = status
def print(*args, **kw):
"""A wrapper for the builtin print() function, which is more thread-safe
and does not conflict with meanwhile's status lines.
"""
with _plock:
status = _status
_hide_status()
_print(*args, **kw)
if status is not None:
_show_status(status)
def _generate_tid():
"""Generate a random thread ID.
"""
return ''.join((choice("0123456789abcdef") for i in range(16)))
class Job(object):
def __init__(self, target, n_threads = 1, n_attempts = 1, factory = False):
"""Initialize a new Job object.
Args:
target - A target function or a target factory.
A target function is a function that gets one arguent of
hashable type.
A target factory is a function that gets no arguments and
returns a target function (or, equivalently, a class for
which __init__ takes no arguments, and __call__ is a
target function).
The target function is the function that will be called
with each input as argument. If a target factory is given,
a target function will be created for each thread spawned.
n_threads - An integer. The maximal number of threads to be used
concurrently.
n_attempts - An integer. The number of attempts for each input
before it is considered 'failed', and the exception
is stored in the exceptions' dictionary.
factory - A boolean. Must be True if (and only if) the target
argument is a target factory.
Note:
All arguments of __init__ can be later changed by the setter
methods: Job.set_target, Job.set_n_threads, and
Job.set_n_attempts.
"""
# Initialize Object's Fields
# **************************
# The target function or target factory:
self._target = (target, factory)
# The queue of inputs to be processed:
self._queue = Queue()
# The set of all inputs ever added:
self._inputs = set()
# The dictionaries that store the results and the excpetions:
self._results = {}
self._exceptions = {}
# Flow control configuration variables:
self._n_attempts = n_attempts
self._n_threads = n_threads
# Thread management:
self._threads = {}
self._n_paused = 0
# Locks:
self._ilock = Lock() # input set lock
self._rlock = Lock() # results lock
self._elock = Lock() # exceptions lock
self._tlock = Lock() # thread dict lock
self._block = Lock() # pause lock
self._plock = Lock() # paused threads counter lock
def _start(self):
"""Spawn new threads as needed."""
# We define `worker`, which will be the target of each thread.
def worker(tid):
# Initialize the thread's target_function, and memorize self._target.
target = self._target
if target[1]:
target_function = target[0]()
else:
target_function = target[0]
while True:
# First, if the job is paused, we wait until it is resumed.
# (If the job is paused, self._block will be locked.)
with self._plock:
self._n_paused += 1
with self._block:
pass
with self._plock:
self._n_paused -= 1
# Then, we check whether there are too many threads, and if
# there are, we commit suicide.
with self._tlock:
if len(self._threads) > self._n_threads:
del self._threads[tid]
return
# If target has changed - reinitialize the thread's
# target_function.
if self._target != target:
target = self._target
if target[1]:
target_function = target[0]()
else:
target_function = target[0]
# Finally, we try to get an input of the queue, and process
# it.
try:
(arg, attempt) = self._queue.get(timeout = 1)
result = target_function(arg)
with self._rlock:
self._results[arg] = result
except Empty:
with self._tlock:
del self._threads[tid]
return
except Exception as e:
if attempt < self._n_attempts:
self._queue.put((arg, attempt + 1))
else:
with self._elock:
self._exceptions[arg] = e
# We spawn the required number of threads.
with self._tlock:
new_threads = self._n_threads - len(self._threads)
for j in range(new_threads):
tid = _generate_tid()
thread = Thread(target = worker, args = (tid,))
self._threads[tid] = thread
thread.start()
def add(self, arg, force = False):
"""Add a new input to the queue to be processed.
Args:
arg - the input to be processed. Must be hashable.
"""
with self._ilock:
if arg not in self._inputs or force:
self._inputs.add(arg)
self._queue.put((arg, 1))
self._start()
def add_many(self, args, force = False):
"""Add multiple new inputs to the queue to be processed.
Args:
args - an iterable that yields inputs. The inputs must be hashable.
"""
with self._ilock:
for arg in args:
if arg not in self._inputs or force:
self._inputs.add(arg)
self._queue.put((arg, 1))
self._start()
def get_n_pending(self):
"""Get the number of pending inputs (not reliable!)
"""
return self._queue.qsize()
def get_n_finished(self):
"""Get the number of inputs for which processing was finished
successfully.
"""
with self._rlock:
return len(self._results)
def get_n_running(self):
"""Get the number of inputs being processed right now (not reliable!)
"""
with self._tlock:
return len(self._threads) - self._n_paused
def get_n_failed(self):
"""Get the number of inputs for which processing has raised an
exception.
"""
with self._elock:
return len(self._exceptions)
def _get_status_string(self):
"""Get the status string (to be printed by self.print_status() or
self.wait().
"""
stats = (self.get_n_pending(),
self.get_n_running(),
self.get_n_finished(),
self.get_n_failed())
template = "pending: {}\t running: {}\t finished: {}\t failed: {}"
return template.format(*stats)
def print_status(self):
"""Show the job's current status."""
print(self._get_status_string())
def wait(self, show_status = True, timeout = None):
"""Wait until all inputs are processed.
KeyboardInterrupt can always be used to stop wait() safely.
Args:
show_status - a boolean. Determines whether to continuously show
the current running status.
timeout - a number. If timeout is a non-negative number, the method
blocks for at most this number of seconds, and then
returns.
"""
try:
if timeout is not None:
et = time() + timeout
pt = time()
if show_status:
with _plock:
_show_status(self._get_status_string())
while self.get_n_running() + self.get_n_pending() > 0:
with self._tlock:
| |
be < 1 for larger error (value), but less overfited networks.
@param previous_study_data_file: only in 'query' mode, this is the saved ANN file name,
the Default is 'NeuroCharterNet.nsr'
@param number_of_epochs_for_overfit_check: The number of points to check if the error increases,
each epoch is represented by one point. (default number = 10)
@param minimum_slope_to_consider_overfitting: the minimum slope of the line formed from the number of
error points (above) from which, if exceeded it will be considered overfitting.
@param input_parameters_suggested_values: the suggested values of input parameters, this is used only in
'advanced query' mode where the ANN will run for the given inputs, and gets the values of outputs.
@param relative_importance_method: The method which the program will calculate relative importance
#: the method of calculation
'Garson' or 'g',
'Milne' or 'm',
'Nesr' or 'n',
'Advance' or 'a',
'Advance Corrected' or 'AdvCor' or 'ac'
Default is Advance Corrected method
@Param weights_trials: The number of trials to identify best set of weights to get least error.
Default = 10 trials
@param using_numpy: if True then the training will be performed using numpy, otherwise it will be performed
by regular math.
"""
def create_a_folder_for_the_study():
""" Create a folder and put all files in:"""
def shorten_time_stamp(time_stamp_text):
short_stamp = ""
split_stamp = map(lambda x: int(x), re.findall('.{1,2}', time_stamp_text))
# print split_stamp
short_stamp += chr(split_stamp[0] + 50)
short_stamp += str(split_stamp[1]) if split_stamp[1] < 10 else chr(split_stamp[1] + 55)
short_stamp += str(split_stamp[2]) if split_stamp[2] < 10 else chr(split_stamp[2] + 55)
short_stamp += str(split_stamp[3]) if split_stamp[3] < 10 else chr(split_stamp[3] + 55)
short_stamp += '{0:02d}'.format(split_stamp[4])
tmp = split_stamp[5] / 2
short_stamp += str(tmp) if tmp < 10 else chr(tmp + 55)
return short_stamp
time_stamp = shorten_time_stamp(dt.now().strftime("%Y%m%d%H%M%S")[2:]) # was [2, -1]
current_folder = os.getcwd()
# create a "Results" Folder
if not os.path.exists(current_folder + "\\Results"):
os.makedirs(current_folder + "\\Results")
# The filename of datafile if it is for simulation, otherwise if for retrival take the previous study name
data_or_ann_name = data_file if data_file != 'DrMohammadEl.nesr' else previous_study_data_file
data_or_ann_name = data_or_ann_name[:-4]
directory_not_ready = True
directory_name = self.folder_prefix + 'NrCh_' + data_or_ann_name + '_' + time_stamp
trial = 0
trial_name = directory_name
new_folder_path = ""
while directory_not_ready:
trial += 1
new_folder_path = current_folder + "\\Results\\" + trial_name
if not os.path.exists(new_folder_path):
os.makedirs(new_folder_path)
directory_not_ready = False
else:
trial_name = directory_name + '-' + str(trial)
# print "Error, Cannot create folder with name %s\n A folder with the same name already exists\n" \
# "Please, wait for few seconds then try again." % directory_name
# exit()
directory_name = trial_name
return time_stamp, directory_name, new_folder_path, current_folder
# Determinig the study's folder and path
self.folder_prefix = folder_prefix
self.time_stamp, self.directory_name, self.new_folder_path, self.current_folder = \
create_a_folder_for_the_study()
# logging Procedure (Refer to class Logger)
self.log_file = Logger(self.new_folder_path + '\\' + 'NrChLog.txt')
self.using_numpy = using_numpy
# print 'Console log...'
self.variable_selection_key = variable_selection_key
temp_key = [int(x) for x in list(variable_selection_key)]
self.all_variables_included = False
if len(temp_key) == sum(temp_key):
self.all_variables_included = True
print '#####################################################################'
print '############ NeuroCharter 1.0.C16-61 #################'
print '#####################################################################'
print '######## Dr. <NAME> & Dr. <NAME> ##########'
print '#####################################################################'
print '\nAnalysis started at : ' + time.strftime('%Y-%m-%d %H:%M:%S')
print 'Using NumPy algorithms? : ' + 'Yes' if self.using_numpy else 'No'
self.source_file_name = data_file if data_file != 'DrMohammadEl.nesr' else previous_study_data_file
if data_file != 'DrMohammadEl.nesr':
print 'Input data file : ' + data_file
else:
print 'Input network file : ' + previous_study_data_file
print 'Output directory name : ' + self.directory_name
print 'Current path : ' + self.new_folder_path + "\n"
def print_net_info():
self.previous_study_data_file = previous_study_data_file
self.temporary = {}
self.data_style = None
self.network_load(self.previous_study_data_file)
num_norm_inputs, num_hidden, num_norm_outputs = self.structure
data = self.temporary
print 'This network is of structure : %d:%d:%d' % (num_norm_inputs, num_hidden, num_norm_outputs)
input_data = data['var_info_input']
output_data = data['var_info_output']
print 'Number of input variables : %d variables' % (len(input_data))
print 'Number of output variables : %d variables' % (len(output_data))
print '\nList of input variables :'
header_list = ['Line #', 'Variable Name #', 'Variable Brief', 'Variable Type']
formatted_table = prettytable.PrettyTable(header_list)
# temp_table = [input_data[x[:4]] for x in input_data]
for x in input_data:
formatted_table.add_row(x[:4])
# formatted_table.add_row(r for r in [input_data[input_data[x][:4]] for x in range(len(input_data))])
print formatted_table
print '\nList of output variables :'
header_list = ['Line #', 'Variable Name #', 'Variable Brief', 'Variable Type']
formatted_table = prettytable.PrettyTable(header_list)
for x in output_data:
formatted_table.add_row(x[:4])
# temp_table = [input_data[x[:4]] for x in ouput_data]
# formatted_table.add_row(r for r in [input_data[x[:4]] for x in input_data])
print formatted_table
print "\nList of numeric variables:"
header_list = ['In/Out', 'Variable Brief', "Minimum", 'Maximum']
formatted_table = prettytable.PrettyTable(header_list)
# temp_table = [input_data[x[:4]] for x in input_data]
for input_var in input_data:
if input_var[3] == 'Numeric':
formatted_table.add_row(['Input', input_var[2], input_var[4], input_var[5]])
for input_var in output_data:
if input_var[3] == 'Numeric':
formatted_table.add_row(['Output', input_var[2], input_var[4], input_var[5]])
print formatted_table
print "\nList of categorical variables:"
header_list = ['In/Out', 'Variable Brief', "Members"]
formatted_table = prettytable.PrettyTable(header_list)
# temp_table = [input_data[x[:4]] for x in input_data]
for input_var in input_data:
if input_var[3] != 'Numeric':
formatted_table.add_row(['Input', input_var[2], input_var[5]])
for input_var in output_data:
if input_var[3] != 'Numeric':
formatted_table.add_row(['Output', input_var[2], input_var[5]])
print formatted_table
print '*Done*'
exit()
pass
if data_file == 'DrMohammadEl.nesr' and purpose == 'info':
print_net_info()
print 'All the variables are included: ' + str(self.all_variables_included)
if not self.all_variables_included:
print 'Included variables key : ' + self.variable_selection_key
self.data_file = data_file
self.num_inputs = 0
self.num_outputs = 0
# self.np_w_i_h = np.array()
if data_file == 'DrMohammadEl.nesr' and purpose not in ['advanced query', 'aq']:
print "A valid input data filename must be provided\n You provided none!\n\nProgram terminated."
exit()
if purpose.lower() in ['query', 'q', 'advanced query', 'aq']:
self.previous_study_data_file = previous_study_data_file
self.temporary = {}
self.data_style = None
query_mode = False if purpose.lower() in ['query', 'q'] else True
self.input_parameters_suggested_values = input_parameters_suggested_values
self.perform_query(query_mode)
print "\nElapsed time throughout the study: ", elapsed_time(start_time, time.time())
print "\n\n***Done***"
pass
else:
# self.data_file = data_file
self.data_file_has_titles = data_file_has_titles
self.data_file_has_brief_titles = data_file_has_brief_titles
self.data_partition = data_partition
self.activation_functions = activation_functions
self.find_activation_function = find_activation_function
self.refresh_weights_after_determining_structure = refresh_weights_after_determining_structure
self.validation_epochs = validation_epochs
self.weights_trials = weights_trials
self.layer_size_range = layer_size_range
self.relative_importance_method = relative_importance_method
self.start_time = start_time if start_time != 0 else time.time()
self.purpose = purpose.lower()
self.tolerance = tolerance
self.maximum_epochs = maximum_epochs
self.basic_learning_rate = learning_rate
self.learning_rate = learning_rate
self.annealing_value = annealing_value
self.categorical_extra_divisor = categorical_extra_divisor
self.master_error_list = []
self.adapt_learning_rate = adapt_learning_rate
# Start to manipulate data
self.source_data = Data(data_file, num_outputs,
has_titles=data_file_has_titles, has_briefs=data_file_has_brief_titles,
parent_study=self, variable_selection_string=variable_selection_key)
self.main_normalized_data = self.source_data.normalized_data
# the amount of data to be used in training
self.normalized_data = [] # self.main_normalized_data[:]
# if using_numpy:
self.np_normalized_data = np.array([])
self.structure = self.source_data.get_normalized_structure()
self.num_inputs_normalized = self.structure[0]
self.num_outputs_normalized = self.structure[2]
self.try_different_structures = try_different_structures
self.display_graph_pdf = display_graph_pdf
self.display_graph_windows = display_graph_windows
self.minimum_slope_to_consider_overfitting = minimum_slope_to_consider_overfitting
self.number_of_epochs_for_overfit_check = number_of_epochs_for_overfit_check
# Initialize an ANN
self.ann = None
# # for numpy mode
# if using_numpy:
# self.np_wih = np.zeros((self.structure[1],self.structure[0]), dtype=float)
# self.np_who = np.zeros((self.structure[2], self.structure[1]), dtype=float)
# # for numpy , the activation function is the sigmoid function
# self.np_activation_function = lambda vvv: special.expit(vvv)
# Start running the study
self.perform_study()
@staticmethod
def convert_list_to_transposed_np_array(normal_list):
"""
:type normal_list: object
"""
temp_array = []
for pair in normal_list:
temp_array.append(map(lambda x: np.array(x, ndmin=2).T, pair))
return np.array(temp_array)
def perform_query(self, advanced=False):
"""
Applies the query mode to predict outputs from inputs
@param advanced: True if advanced mode, False if normal mode
@return:
@return: pass
"""
self.start_time = time.time()
def read_cell(cel):
"""
Read data and specify if string or numeric
@param cel: data cell
@return: float of string value
"""
try:
return float(cel)
except:
return cel
def steps_of_var(lst):
""" Determine the number of steps that will be executed in a range
@rtype: tuple of two elements, the first is the number of runs of current range,
the second is a boolean value True if the values are float, and False elsewhere.
"""
i = 0
is_float = False
if len(lst) == 1:
lst = [0, lst[0], 1]
if len(lst) == 2:
lst.append(1)
if len(lst) > 3:
print "Warning:\n ========\n\n A list you input contains more than 3 elements!\n If you want to " \
"input multiple values, please use a tuple instead of a list.\ni.e. use () instead | |
Campaigning, etc.
**Implementation Phase:** Caravel partners work with group teams to strategize
products around margin levers.
### Est. Impact € 3.5-6 M/Yr ###
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "350px"},
id='explain1a',
),
html.Div([
dcc.Markdown('''
###### Demonstrates margin disparity and product buckets. ######
The default view of the following interactive charts show that of all
possible combinations of thicknesses, widths, base types, treatments, colors,
polymers and product groups and families, **53 were statistically influential
on EBITDA.** Ex: Selecting all products that are described by the 10 most positively
influential of those descriptors accounts for 47% of EBITDA for 2019 and 16%
of the production volume i.e. a significant production effort is spent on
products that do not give a positive contribution to EBITDA. **All 53 descriptors
are made available here.**
------
* Descriptors can be selected from eight categories:
* thickness, width, base type, treatment, color, polymer, product family & group
* Descriptors are sorted by either best (describe high EBITDA products) or
worst (describe low EBITDA products)
* The range bar updates what descriptors are shown in the violin plot and EBITDA
by Product Family Plot as well as what is calculated in EBITDA, unique products, and volume displays
------
A violin plot of EBITDA values is constructed of each descriptor
selected by the range bar. A violin plot is a method of plotting
distributions. It is similar to a box plot, with the addition of a rotated
kernel density (kde) plot on each side. **The benefit of the kde is to visualize
the density of the data without obstructing key outliers** *(ex: 200-400K EBITDA
outliers in 2D Coil Coating and Base Type 153/07)*
Clicking on a distribution in the violin
plot expands the sunburst chart to its right. A sunburst chart is a way of
representing hierarchical data structures. In this case it is showing the
product breakdown for a given descriptor. For instance, products with base
types of 202/14 fall within the Construction category, with PVC polymer, ZZZ
treatment, and OP color. The bandwidths that lie on each ring indicate the
production volume fraction for that given descriptor while color indicates
the average EBITDA for all products described by that section of the sunburst *(ex:
in the default view, highest EBITDA base type 202/14 products have a width of 955
while lowest EBITDA have a width of 400 and each of these count for 1 production
run out of 23 for this product group).* Thickness and width can be toggled on the sunburst chart for clarity.
Descriptors in the violin plot are overlayed onto the EBITDA by Product Family
chart. In this way, product descriptors can be evaluated within the broader portfolio
*(ex: toggling the best/worst rank selector above
will alternate highlighting the high margin and negative margin products within
each family, respectively).*
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "350px",
"overflow": "scroll"},
id='explain1b',
),
], className='row container-display',
),
html.Div([
html.Div([
html.H6(id='margin-new-rev'), html.P('Adjusted EBITDA')
], className='mini_container',
id='margin-rev',
),
html.Div([
html.H6(id='margin-new-rev-percent'), html.P('Unique Products')
], className='mini_container',
id='margin-rev-percent',
),
html.Div([
html.H6(id='margin-new-products'), html.P('Volume')
], className='mini_container',
id='margin-products',
),
], className='row container-display',
# style={'border-color': '#ED2222',
# 'background-color': '#aec7e8'},
),
html.Div([
html.Div([
html.P('Descriptors'),
dcc.Dropdown(id='descriptor_dropdown',
options=[{'label': 'Thickness', 'value': 'Thickness Material A'},
{'label': 'Width', 'value': 'Width Material Attri'},
{'label': 'Base Type', 'value': 'Base Type'},
{'label': 'Additional Treatment', 'value': 'Additional Treatment'},
{'label': 'Color', 'value': 'Color Group'},
{'label': 'Product Group', 'value': 'Product Group'},
{'label': 'Base Polymer', 'value': 'Base Polymer'},
{'label': 'Product Family', 'value': 'Product Family'}],
value=['Thickness Material A',
'Width Material Attri', 'Base Type',
'Additional Treatment', 'Color Group',
'Product Group',
'Base Polymer', 'Product Family'],
multi=True,
className="dcc_control"),
html.P('Number of Descriptors:', id='descriptor-number'),
dcc.RangeSlider(
id='select',
min=0,
max=stat_df.shape[0],
step=1,
value=[0,10],
),
html.P('Sort by:'),
dcc.RadioItems(
id='sort',
options=[{'label': i, 'value': j} for i, j in \
[['Low EBITDA', 'Worst'],
['High EBITDA', 'Best']]],
value='Best',
labelStyle={'display': 'inline-block'},
style={"margin-bottom": "10px"},),
html.P('Toggle Violin/Descriptor Data onto EBITDA by Product Family:'),
daq.BooleanSwitch(
id='daq-violin',
on=False,
style={"margin-bottom": "10px", "margin-left": "0px",
'display': 'inline-block'}),
], className='mini_container',
id='descriptorBlock',
),
html.Div([
dcc.Graph(
id='ebit_plot',
figure=make_ebit_plot(production_df)),
], className='mini_container',
id='ebit-family-block'
),
], className='row container-display',
),
html.Div([
html.Div([
dcc.Graph(
id='violin_plot',
figure=make_violin_plot()),
], className='mini_container',
id='violin',
),
html.Div([
dcc.Dropdown(id='length_width_dropdown',
options=[{'label': 'Thickness', 'value': 'Thickness Material A'},
{'label': 'Width', 'value': 'Width Material Attri'}],
value=['Width Material Attri'],
multi=True,
placeholder="Include in sunburst chart...",
className="dcc_control"),
dcc.Graph(
id='sunburst_plot',
figure=make_sunburst_plot()),
], className='mini_container',
id='sunburst',
),
], className='row container-display',
style={'margin-bottom': '50px'},
),
html.H5(["Margin Velocity"]),
html.Div([
html.Div([
dcc.Markdown('''
###### Key Finding: ######
There is clear segmentation in line and product families
in their margin velocity. High EBITDA per Hr product lines should be expanded
while low EBITDA per Hr product lines should be discontinued or augmented
with pricing and other levers.
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "300px"},
id='explain2a',
),
html.Div([
dcc.Markdown('''
###### Looks at margin velocity by product family and line. ######
A product can have a very high margin. But if it takes 4x as long to make it
vs other products the margin velocity, and hence its value, may be much less than
previously thought.
Margin velocity gives you a sense of which products should be growing and
which ones should be removed *(ex: in the default view of the
following chart, we would like to prioritize all products appearing to the right,
(high EBITDA per Hr) pushing them further up the y-axis (Adjusted EBITDA) by
increasing their Size (production volume)).*
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "300px"},
id='explain2b',
),
], className='row container-display',
),
html.Div([
html.Div([
html.Div([
html.P('X-axis'),
dcc.Dropdown(id='x-select',
options=[{'label': i, 'value': i} for i in \
['Rate', 'Yield', 'EBITDA per Hr Rank',\
'Adjusted EBITDA', 'Net Sales Quantity in KG']],
value='EBITDA per Hr Rank',),
], className='mini_container',
id='x-select-box',
),
html.Div([
html.P('Y-axis'),
dcc.Dropdown(id='y-select',
options=[{'label': i, 'value': i} for i in \
['EBITDA per Hr', 'Adjusted EBITDA',\
'Net Sales Quantity in KG']],
value='Adjusted EBITDA',),
],className='mini_container',
id='y-select-box',
),
html.Div([
html.P('Color'),
dcc.Dropdown(id='color-select',
options=[{'label': i, 'value': i} for i in \
['Line', 'Thickness Material A',\
'Width Material Attri', 'Product Family']],
value='Line',),
],className='mini_container',
id='color-select-box',
),
], className='row container-display',
),
],
),
html.Div([
dcc.Graph(
id='bubble_plot',
figure=make_bubble_chart(),
),
], className='mini_container',
style={'margin-bottom': '100px'},
),
html.H3(["Asset Performance Analysis"]),
html.Div([
html.Div([
dcc.Markdown('''
###### Key Finding: ######
If sales can not come through with additional volumes,
Lines such as E26, K06 should be considered for Consolidation. There is
evidence to suggest that consolidating these lines into higher performing
lines is possible.
**Implementation Phase:** Caravel partners will assist in unutilized capacity
being be monetized.
### Est. Impact € 2-4 M/Yr ###
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "350px"},
id='explain3a',
),
html.Div([
dcc.Markdown('''
###### Explores key variables that affect rate, yield, and uptime ######
In this graphic, scores reflect whether or not a group (line or product family) is
improving uptime, rate, or yield. The statistical test is similar to that
performed for the product descriptors in the margin analysis.
While groups were determined to be statistically impactful
(null hypothesis < 0.01) it does not guarantee decoupling. For
instance, PSL has a very negative impact on rate and yield, however, the only
line that runs PSL is E28 and is rated similarly.
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "350px"},
id='explain3b',
),
], className='row container-display',
),
html.Div([
dcc.Graph(
id='scores_plot',
figure=make_culprits()),
html.Pre(id='slider-data'),
html.Pre(id='click-data'),
], className='mini_container',
style={'margin-bottom': '50px'},
),
html.H5(["Line Performance"]),
html.Div([
html.Div([
dcc.Markdown('''
###### Key Finding: ######
Newest and most state-of-the-art lines are E27, K06, & K17
with stable yield, uptime, and rate performance relative to the others.
K40, E26, E28, K10 have the most upside opportunity.
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "300px"},
id='explain4a',
),
html.Div([
dcc.Markdown('''
###### Quantifies the opportunity in each line in terms of equivalent days of production
Unutilized capacity should be monetized. Priority for capturing increased asset
capability should be on Lines E27, K40 -
This will take a sharper focus on true continuous improvement.
The organization tracks daily operating parameters, but there does not appear
to be a concerted effort with a project mentality on thinking in strategical
improvement terms to capture hidden plant opportunities (increases in yield, uptime and rate).
------
In the following charts, selecting a quantile on the range bar will update
the predicted upside. This effectively pushes each line into its upper quantiles
in relation to rate, yield, and uptime. Selecting a line in the Annualized opportunity
chart will pareto out product family areas.
'''),
], className='pretty_container',
style={"background-color": "#ffffff",
"maxHeight": "300px"},
id='explain4b',
),
], className='row container-display',
),
html.Div([
html.Div([
html.H6(id='new-rev'), html.P('Total Days of Production Saved')
], className='mini_container',
id='rev',
),
html.Div([
html.H6(id='new-rev-percent'), html.P('Rate (days)')
], className='mini_container',
id='rev-percent',
),
html.Div([
html.H6(id='new-products'), html.P('Yield (days)')
], className='mini_container',
id='products',
),
html.Div([
html.H6(id='new-products-percent'), html.P('Uptime (days)')
], className='mini_container',
id='products-percent',
),
], className='row container-display'
),
html.Div([
html.Div([
html.H6(id='slider-selection'),
dcc.Slider(id='quantile_slider',
min=0.51,
max=0.99,
step=0.01,
value=.82,
included=False,
className="dcc_control"),
dcc.Graph(
id='bar_plot',
figure=make_days_plot()),
], className='mini_container',
id='opportunity',
),
], className='row container-display',
),
html.Div([
html.Div([
dcc.Graph(
id='pareto_plot',
figure=pareto_product_family())
], className='mini_container',
id='pareto',
),
html.Div([
dcc.Graph(
id='pie_plot',
| |
<reponame>huadream/networking-vsphere
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from neutron.agent.common import ovs_lib
from neutron.conf.agent import common as config
from neutron_lib import constants
from networking_vsphere.drivers import ovs_firewall as ovs_fw
from networking_vsphere.tests import base
fake_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'sg_provider_rules': [],
'security_group_rules_deleted': [],
'security_group_rules': [
{"direction": "ingress",
"protocol": "tcp",
"port_range_min": 2001,
"port_range_max": 2009,
"source_port_range_min": 67,
"source_port_range_max": 68,
"ethertype": "IPv4",
"source_ip_prefix": "192.168.127.12/22",
"dest_ip_prefix": "192.168.3.11/22"}]}
fake_res_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'device': "123"}
cookie = ("0x%x" % (hash("123") & 0xffffffffffffffff))
class TestOVSFirewallDriver(base.TestCase):
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'_mod_ovs_flow')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'_modify_tcp_and_udp_learning_flows')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
@mock.patch('neutron.agent.ovsdb.impl_idl.api_factory')
def setUp(self, mock_ovsdb_api, mock_get_port_ofport, mock_set_secure_mode,
mock_create_ovs_bridge, mock_setup_base_flows,
mock_check_ovs_firewall_restart,
mock_modify_tcp_and_udp_learning_flows,
mock_mod_ovs_flow):
super(TestOVSFirewallDriver, self).setUp()
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_get_port_ofport.return_value = 5
self.ovs_firewall = ovs_fw.OVSFirewallDriver()
self.ovs_firewall.sg_br = mock.Mock()
self.mock_br = ovs_lib.DeferredOVSBridge(self.ovs_firewall.sg_br)
self.LOG = ovs_fw.LOG
def test_get_compact_port(self):
compact_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'device': "123",
'security_groups': "abc",
'lvid': "100"}
res = self.ovs_firewall._get_compact_port(fake_port)
self.assertEqual(compact_port, res)
def test_remove_ports_from_provider_cache(self):
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['123', '125'])
self.assertEqual(set(['124']), self.ovs_firewall.provider_port_cache)
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['121', '125'])
self.assertEqual(set(['123', '124']),
self.ovs_firewall.provider_port_cache)
def test_add_ovs_flow(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal")
mock_add_flow.assert_called_with(priority=0, actions='normal',
table=1)
def test_add_ovs_flow_with_protocol(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with protocol
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
protocol="arp")
mock_add_flow.assert_called_with(table=1, priority=0,
proto="arp", actions="normal")
def test_add_ovs_flow_with_dest_mac(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with dl_dest
dest_mac = "01:00:00:00:00:00"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
dl_dest=dest_mac)
mock_add_flow.assert_called_with(table=1, priority=0,
dl_dst=dest_mac,
actions="normal")
def test_add_ovs_flow_with_tcpflag(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with tcp_flags
t_flag = "+rst"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
tcp_flag=t_flag)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_TCP,
tcp_flags=t_flag,
actions="normal")
def test_add_ovs_flow_with_icmptype(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with icmp_req_type
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
icmp_req_type=11)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_ICMP,
icmp_type=11, actions="normal")
def test_mod_ovs_flow(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'mod_flow') as mock_mod_flow:
self.ovs_firewall._mod_ovs_flow(self.mock_br, 1, "normal")
mock_mod_flow.assert_called_with(actions='normal', table=1)
def test_mod_ovs_flow_with_protocol(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'mod_flow') as mock_mod_flow:
# rule with protocol
self.ovs_firewall._mod_ovs_flow(self.mock_br, 1, "normal",
protocol="arp")
mock_mod_flow.assert_called_with(table=1,
proto="arp", actions="normal")
def test_mod_ovs_flow_with_tcpflag(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'mod_flow') as mock_mod_flow:
# rule with tcp_flags
t_flag = "+rst"
self.ovs_firewall._mod_ovs_flow(self.mock_br, 1, "normal",
tcp_flag=t_flag)
mock_mod_flow.assert_called_with(table=1,
proto=constants.PROTO_NAME_TCP,
tcp_flags=t_flag,
actions="normal")
def test_add_ports_to_filter(self):
self.ovs_firewall.filtered_ports = {}
self.ovs_firewall.add_ports_to_filter([fake_port])
self.assertIsNotNone(self.ovs_firewall.filtered_ports)
ret_port = self.ovs_firewall.filtered_ports["123"]
self.assertEqual(fake_res_port, ret_port)
def test_setup_aap_flows(self):
port_with_app = copy.deepcopy(fake_port)
key = "allowed_address_pairs"
port_with_app[key] = [{'ip_address': '10.0.0.2',
'mac_address': 'aa:bb:cc:dd:ee:aa'},
{'ip_address': '10.0.0.3',
'mac_address': 'aa:bb:cc:dd:ee:ab'}]
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertEqual(2, mock_add_flow.call_count)
def test_setup_aap_flows_invalid_call(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertFalse(mock_add_flow.called)
def test_get_net_prefix_len(self):
ip_addr = "192.168.127.12/22"
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertNotEqual(0, length)
ip_addr = None
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertEqual(0, length)
def test_get_protocol(self):
proto = self.ovs_firewall._get_protocol("IPv4", None)
self.assertEqual(['ip'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", None)
self.assertEqual(['ipv6'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'icmp')
self.assertEqual(['icmp6'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'icmp')
self.assertEqual(['icmp'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'udp')
self.assertEqual(['udp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'tcp')
self.assertEqual(['tcp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'unknown')
self.assertEqual(['ipv6', 'unknown'], proto)
def test_add_flow_with_range(self):
flow = {"priority": 1}
res_flow = {"priority": 1,
"tp_dst": 1,
"tp_src": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_on_sec_br'
) as mock_do_flows_action_on_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 2, 1, 2)
mock_do_flows_action_on_sec_br.called_with(res_flow)
self.assertEqual(4, mock_do_flows_action_on_sec_br.call_count)
def test_add_flow_with_multiple_range(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_on_sec_br'
) as mock_do_flows_action_on_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 3, 1, 2)
self.assertEqual(6, mock_do_flows_action_on_sec_br.call_count)
def test_add_flow_with_range_all_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_on_sec_br'
) as mock_do_flows_action_on_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 65535)
self.assertEqual(1, mock_do_flows_action_on_sec_br.call_count)
def test_add_flow_with_range_some_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_on_sec_br'
) as mock_do_flows_action_on_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 100)
self.assertEqual(100, mock_do_flows_action_on_sec_br.call_count)
def test_add_flows_to_sec_br_ingress_direction(self):
flows = {}
port = fake_port
direction = "ingress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(1, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'192.168.3.11']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(2, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction_multiple_fixed_ips(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'192.168.3.11', u'172.16.31.10']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(4, mock_add_flow.call_count)
def test_add_flows_call_no_vlan(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=None), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow,\
mock.patch.object(self.LOG, 'error') as mock_error_log:
self.ovs_firewall._add_flows(self.mock_br, port_with_app, cookie)
self.assertFalse(mock_add_flow.called)
self.assertTrue(mock_error_log.called)
def test_add_flows_call_tcp(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['tcp']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertTrue(mock_add_range_flows.called)
def test_add_flows_call_normal(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['ip']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall,
'_do_flows_action_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertFalse(mock_add_range_flows.called)
self.assertTrue(mock_add_flow.called)
def test_prepare_port_filter(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
ret_port = self.ovs_firewall.filtered_ports['123']
self.assertEqual(fake_res_port, ret_port)
self.assertEqual(set(['123']),
self.ovs_firewall.provider_port_cache)
def test_prepare_port_filter_exception(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows',
side_effect=Exception()
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
self.assertFalse(mock_add_flow_fn.called)
self.assertTrue(mock_exception_log.called)
self.assertEqual(set(), self.ovs_firewall.provider_port_cache)
def test_remove_only_tenant_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_all_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(4, mock_del_flows.call_count)
def test_remove_all_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_all_flows(self.mock_br, "123", True)
self.assertTrue(mock_get_vlan.called)
self.assertEqual(7, mock_del_flows.call_count)
def test_remove_flows_invalid_port(self):
res_port = copy.deepcopy(fake_res_port)
res_port.pop('mac_address')
self.ovs_firewall.filtered_ports["123"] = res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows, \
mock.patch.object(self.LOG, 'debug') as mock_debug_log:
self.ovs_firewall._remove_all_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(1, mock_del_flows.call_count)
self.assertEqual(2, mock_debug_log.call_count)
def test_clean_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_all_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"])
mock_rem_flow.assert_called_with(self.mock_br, "123")
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_clean_port_filters_remove_port(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_all_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertNotIn("123", self.ovs_firewall.filtered_ports)
self.assertNotIn("123", self.ovs_firewall.provider_port_cache)
def test_clean_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_all_flows',
side_effect=Exception()
) as mock_rem_flow, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertTrue(mock_exception_log.called)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_normal_update_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, fake_port,
cookie)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(1, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_update_port_filters_for_provider_update(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, fake_port,
cookie)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
def test_update_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows',
side_effect=Exception()) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as | |
<gh_stars>0
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from datetime import datetime, timedelta
import re
import koji
import mock
from mock import call, patch
import pytest
from module_build_service.common.config import conf
from module_build_service.common import models
from module_build_service.scheduler import producer
from module_build_service.scheduler.db_session import db_session
from tests import clean_database, make_module_in_db
@pytest.mark.usefixtures("reuse_component_init_data")
@patch(
"module_build_service.builder.GenericBuilder.default_buildroot_groups",
return_value={"build": [], "srpm-build": []},
)
@patch("module_build_service.builder.GenericBuilder.create_from_module")
class TestPoller:
def setup_method(self, test_method):
self.p_read_config = patch(
"koji.read_config",
return_value={
"authtype": "kerberos",
"timeout": 60,
"server": "http://koji.example.com/",
},
)
self.mock_read_config = self.p_read_config.start()
def teardown_method(self, test_method):
self.p_read_config.stop()
clean_database()
@pytest.mark.parametrize("fresh", [True, False])
@patch("module_build_service.scheduler.batches.start_build_component")
def test_process_paused_module_builds(
self, start_build_component, create_builder, dbg, fresh
):
"""
Tests general use-case of process_paused_module_builds.
"""
builder = mock.MagicMock()
create_builder.return_value = builder
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.batch = 2
# If fresh is set, then we simulate that activity just occurred 2 minutes ago on the build
if fresh:
module_build.time_modified = datetime.utcnow() - timedelta(minutes=2)
else:
module_build.time_modified = datetime.utcnow() - timedelta(days=5)
db_session.commit()
# Poll :)
producer.process_paused_module_builds()
module_build = models.ModuleBuild.get_by_id(db_session, 3)
# If fresh is set, we expect the poller to not touch the module build since it's been less
# than 10 minutes of inactivity
if fresh:
expected_state = None
expected_build_calls = 0
else:
expected_state = koji.BUILD_STATES["BUILDING"]
expected_build_calls = 2
components = module_build.current_batch()
for component in components:
assert component.state == expected_state
assert len(start_build_component.mock_calls) == expected_build_calls
@pytest.mark.parametrize('task_state, expect_start_build_component', (
(None, True), # Indicates a newRepo task has not been triggered yet.
(koji.TASK_STATES["CLOSED"], True),
(koji.TASK_STATES["OPEN"], False),
))
@patch("module_build_service.scheduler.batches.start_build_component")
def test_process_paused_module_builds_with_new_repo_task(
self, start_build_component, create_builder, dbg, task_state,
expect_start_build_component
):
"""
Tests general use-case of process_paused_module_builds.
"""
builder = mock.MagicMock()
create_builder.return_value = builder
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.batch = 2
module_build.time_modified = datetime.utcnow() - timedelta(days=5)
if task_state:
koji_session = mock.MagicMock()
koji_session.getTaskInfo.return_value = {"state": task_state}
builder.koji_session = koji_session
module_build.new_repo_task_id = 123
db_session.commit()
# Poll :)
producer.process_paused_module_builds()
module_build = models.ModuleBuild.get_by_id(db_session, 3)
if expect_start_build_component:
expected_state = koji.BUILD_STATES["BUILDING"]
expected_build_calls = 2
else:
expected_state = None
expected_build_calls = 0
components = module_build.current_batch()
for component in components:
assert component.state == expected_state
assert len(start_build_component.mock_calls) == expected_build_calls
@patch("koji.ClientSession")
def test_retrigger_new_repo_on_failure(self, ClientSession, create_builder, dbg):
"""
Tests that we call koji_sesion.newRepo when newRepo task failed.
"""
koji_session = ClientSession.return_value
koji_session.getTag = lambda tag_name: {"name": tag_name}
koji_session.getTaskInfo.return_value = {"state": koji.TASK_STATES["FAILED"]}
koji_session.newRepo.return_value = 123456
builder = mock.MagicMock()
builder.buildroot_ready.return_value = False
create_builder.return_value = builder
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.batch = 2
module_build.new_repo_task_id = 123456
db_session.commit()
producer.retrigger_new_repo_on_failure()
koji_session.newRepo.assert_called_once_with(
"module-testmodule-master-20170219191323-c40c156c-build")
@patch("koji.ClientSession")
def test_trigger_new_repo_when_succeeded(self, ClientSession, create_builder, dbg):
"""
Tests that we do not call koji_sesion.newRepo when newRepo task
succeeded.
"""
koji_session = ClientSession.return_value
koji_session.getTag = lambda tag_name: {"name": tag_name}
koji_session.getTaskInfo.return_value = {"state": koji.TASK_STATES["CLOSED"]}
koji_session.newRepo.return_value = 123456
builder = mock.MagicMock()
builder.buildroot_ready.return_value = False
create_builder.return_value = builder
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.batch = 2
module_build.new_repo_task_id = 123456
db_session.commit()
producer.retrigger_new_repo_on_failure()
module_build = models.ModuleBuild.get_by_id(db_session, 3)
assert not koji_session.newRepo.called
assert module_build.new_repo_task_id == 123456
def test_process_paused_module_builds_waiting_for_repo(self, create_builder, dbg):
"""
Tests that process_paused_module_builds does not start new batch
when we are waiting for repo.
"""
builder = mock.MagicMock()
create_builder.return_value = builder
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.batch = 2
module_build.new_repo_task_id = 123456
db_session.commit()
# Poll :)
producer.process_paused_module_builds()
module_build = models.ModuleBuild.get_by_id(db_session, 3)
# Components should not be in building state
components = module_build.current_batch()
for component in components:
assert component.state is None
@patch("koji.ClientSession")
def test_old_build_targets_are_not_associated_with_any_module_builds(
self, ClientSession, create_builder, dbg
):
koji_session = ClientSession.return_value
# No created module build has any of these tags.
koji_session.getBuildTargets.return_value = [
{"dest_tag_name": "module-xxx-1"},
{"dest_tag_name": "module-yyy-2"},
]
producer.delete_old_koji_targets()
koji_session.deleteBuildTarget.assert_not_called()
@patch("koji.ClientSession")
def test_dont_delete_base_module_build_target(
self, ClientSession, create_builder, dbg
):
module_build = models.ModuleBuild.get_by_id(db_session, 3)
koji_session = ClientSession.return_value
# No created module build has any of these tags.
koji_session.getBuildTargets.return_value = [{"dest_tag_name": module_build.koji_tag}]
# If module build's name is one of base module names, build target
# should not be deleted.
with patch.object(conf, "base_module_names", new=[module_build.name]):
producer.delete_old_koji_targets()
koji_session.deleteBuildTarget.assert_not_called()
@patch("koji.ClientSession")
def test_dont_delete_build_target_for_unfinished_module_builds(
self, ClientSession, create_builder, dbg
):
module_build = models.ModuleBuild.get_by_id(db_session, 3)
koji_session = ClientSession.return_value
# No created module build has any of these tags.
koji_session.getBuildTargets.return_value = [{"dest_tag_name": module_build.koji_tag}]
# Each time when a module build is in one of these state, build target
# should not be deleted.
for state in ["init", "wait", "build"]:
module_build.state = state
db_session.commit()
producer.delete_old_koji_targets()
koji_session.deleteBuildTarget.assert_not_called()
@patch("koji.ClientSession")
def test_only_delete_build_target_with_allowed_koji_tag_prefix(
self, ClientSession, create_builder, dbg
):
module_build_2 = models.ModuleBuild.get_by_id(db_session, 2)
# Only module build 1's build target should be deleted.
module_build_2.koji_tag = "module-tag1"
module_build_2.state = models.BUILD_STATES["done"]
# Ensure to exceed the koji_target_delete_time easily later for deletion
module_build_2.time_completed = datetime.utcnow() - timedelta(hours=24)
module_build_3 = models.ModuleBuild.get_by_id(db_session, 3)
module_build_3.koji_tag = "f28"
db_session.commit()
db_session.refresh(module_build_2)
db_session.refresh(module_build_3)
koji_session = ClientSession.return_value
# No created module build has any of these tags.
koji_session.getBuildTargets.return_value = [
{"id": 1, "dest_tag_name": module_build_2.koji_tag, "name": module_build_2.koji_tag},
{"id": 2, "dest_tag_name": module_build_3.koji_tag, "name": module_build_3.koji_tag},
]
with patch.object(conf, "koji_tag_prefixes", new=["module", "another-prefix"]):
with patch.object(conf, "koji_target_delete_time", new=60):
producer.delete_old_koji_targets()
koji_session.deleteBuildTarget.assert_called_once_with(1)
koji_session.krb_login.assert_called_once()
@patch("koji.ClientSession")
def test_cant_delete_build_target_if_not_reach_delete_time(
self, ClientSession, create_builder, dbg
):
module_build_2 = models.ModuleBuild.get_by_id(db_session, 2)
# Only module build 1's build target should be deleted.
module_build_2.koji_tag = "module-tag1"
module_build_2.state = models.BUILD_STATES["done"]
# Ensure to exceed the koji_target_delete_time easily later for deletion
module_build_2.time_completed = datetime.utcnow() - timedelta(minutes=5)
db_session.commit()
db_session.refresh(module_build_2)
koji_session = ClientSession.return_value
# No created module build has any of these tags.
koji_session.getBuildTargets.return_value = [
{"id": 1, "dest_tag_name": module_build_2.koji_tag, "name": module_build_2.koji_tag}
]
with patch.object(conf, "koji_tag_prefixes", new=["module"]):
# Use default koji_target_delete_time in config. That time is long
# enough for test.
producer.delete_old_koji_targets()
koji_session.deleteBuildTarget.assert_not_called()
@pytest.mark.parametrize("state", ["init", "wait"])
@patch.dict(producer.ON_MODULE_CHANGE_HANDLERS, clear=True, values={
models.BUILD_STATES["init"]: mock.Mock(),
models.BUILD_STATES["wait"]: mock.Mock(),
})
def test_process_waiting_module_build(self, create_builder, dbg, state):
""" Test that processing old waiting module builds works. """
handler = producer.ON_MODULE_CHANGE_HANDLERS[models.BUILD_STATES[state]]
# Change the batch to 2, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.state = models.BUILD_STATES[state]
original = datetime.utcnow() - timedelta(minutes=11)
module_build.time_modified = original
db_session.commit()
db_session.refresh(module_build)
# Poll :)
producer.process_waiting_module_builds()
handler.delay.assert_called_once_with(
"internal:mbs.module.state.change",
module_build.id,
module_build.state
)
db_session.refresh(module_build)
# ensure the time_modified was changed.
assert module_build.time_modified > original
@pytest.mark.parametrize("state", ["init", "wait"])
@patch.dict(producer.ON_MODULE_CHANGE_HANDLERS, clear=True, values={
models.BUILD_STATES["init"]: mock.Mock(),
models.BUILD_STATES["wait"]: mock.Mock(),
})
def test_process_waiting_module_build_not_old_enough(
self, create_builder, dbg, state
):
""" Test that we do not process young waiting builds. """
handler = producer.ON_MODULE_CHANGE_HANDLERS[models.BUILD_STATES[state]]
# Change the batch to build, so the module build is in state where
# it is not building anything, but the state is "build".
module_build = models.ModuleBuild.get_by_id(db_session, 3)
module_build.state = models.BUILD_STATES[state]
original = datetime.utcnow() - timedelta(minutes=9)
module_build.time_modified = original
db_session.commit()
db_session.refresh(module_build)
# Poll :)
producer.process_waiting_module_builds()
handler.assert_not_called()
@patch.dict(producer.ON_MODULE_CHANGE_HANDLERS, clear=True, values={
models.BUILD_STATES["init"]: mock.Mock(),
models.BUILD_STATES["wait"]: mock.Mock(),
})
def test_process_waiting_module_build_none_found(self, create_builder, dbg):
""" Test nothing happens when no module builds are waiting. """
# Poll :)
producer.process_waiting_module_builds()
# Ensure we did *not* process any of the non-waiting builds.
for handler in producer.ON_MODULE_CHANGE_HANDLERS.values():
handler.assert_not_called()
def test_cleanup_stale_failed_builds(self, create_builder, dbg):
""" Test that one of the two module builds gets to the garbage state when running
cleanup_stale_failed_builds.
"""
builder = mock.MagicMock()
create_builder.return_value = builder
module_build_one = models.ModuleBuild.get_by_id(db_session, 2)
module_build_one.state = models.BUILD_STATES["failed"]
module_build_one.time_modified = datetime.utcnow() - timedelta(
days=conf.cleanup_failed_builds_time + 1)
module_build_two = models.ModuleBuild.get_by_id(db_session, 3)
module_build_two.time_modified = datetime.utcnow()
module_build_two.state = models.BUILD_STATES["failed"]
failed_component = db_session.query(models.ComponentBuild).filter_by(
package="tangerine", module_id=3).one()
failed_component.state = koji.BUILD_STATES["FAILED"]
failed_component.tagged = False
failed_component.tagged_in_final = False
db_session.commit()
producer.cleanup_stale_failed_builds()
db_session.refresh(module_build_two)
# Make sure module_build_one was transitioned to garbage
assert module_build_one.state == models.BUILD_STATES["garbage"]
state_reason = (
"The module was garbage collected since it has failed over {0} day(s) ago"
.format(conf.cleanup_failed_builds_time)
)
assert module_build_one.state_reason == state_reason
# Make sure all the components are marked as untagged in the database
for component in module_build_one.component_builds:
assert not component.tagged
| |
import os
import sys
import types
import logging
import inspect
import weakref
import traceback
import importlib
from maya import cmds, OpenMaya
from pyblish import api as pyblish
from .vendor.Qt import QtCore, QtWidgets
from . import (
lib,
Session,
_registered_root,
_registered_config,
_registered_plugins,
_registered_plugin_paths,
_registered_event_handlers,
)
from .vendor import six
log = logging.getLogger(__name__)
self = sys.modules[__name__]
self._menu = "jiminymaya" # Unique name of menu
self._events = dict() # Registered Maya callbacks
self._parent = None # Main Window
IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True)
def install():
log.info("Jiminy Cricket, at your service.")
_register_callbacks()
if not IS_HEADLESS:
_install_menu()
pyblish.register_host("maya")
config = find_config()
config.install()
register_config(config)
def uninstall():
log.info("Farewell, my friend.")
config = registered_config()
config.uninstall()
deregister_config()
if not IS_HEADLESS:
_uninstall_menu()
def find_config():
log.info("Finding configuration for project..")
config = os.environ.get("JIMINY_DRESS")
if not config:
raise EnvironmentError("No configuration found.")
log.info("Found %s, loading.." % config)
return importlib.import_module(config)
def register_config(config):
_registered_config["_"] = config
def deregister_config():
_registered_config["_"] = None
def registered_config():
"""Return currently registered config"""
return _registered_config["_"]
def _install_menu():
from .tools import (
#creator,
#loader,
publish,
)
_uninstall_menu()
self._parent = {
widget.objectName(): widget
for widget in QtWidgets.QApplication.topLevelWidgets()
}["MayaWindow"]
def deferred():
cmds.menu(self._menu,
label="Pipeline",
tearOff=True,
parent="MayaWindow")
"""
cmds.menuItem("Create...",
command=lambda *args: creator.show(parent=self._parent))
cmds.menuItem("Load...",
command=lambda *args:
loader.show(parent=self._parent))
"""
cmds.menuItem("Publish...",
command=lambda *args: publish.show(parent=self._parent),
image=publish.ICON)
cmds.menuItem(divider=True, label="Create...")
# Allow time for uninstallation to finish.
QtCore.QTimer.singleShot(100, deferred)
def _uninstall_menu():
app = QtWidgets.QApplication.instance()
widgets = dict((w.objectName(), w) for w in app.allWidgets())
menu = widgets.get(self._menu)
if menu:
menu.deleteLater()
del(menu)
def create(name, asset, family, options=None, data=None):
"""Create a new instance
Associate nodes with a subset and family. These nodes are later
validated, according to their `family`, and integrated into the
shared environment, relative their `subset`.
Data relative each family, along with default data, are imprinted
into the resulting objectSet. This data is later used by extractors
and finally asset browsers to help identify the origin of the asset.
Arguments:
name (str): Name of subset
asset (str): Name of asset
family (str): Name of family
options (dict, optional): Additional options from GUI
data (dict, optional): Additional data from GUI
Raises:
NameError on `subset` already exists
KeyError on invalid dynamic property
RuntimeError on host error
Returns:
Name of instance
"""
plugins = list()
for Plugin in discover(Creator):
has_family = family == Plugin.family
if not has_family:
continue
Plugin.log.info(
"Creating '%s' with '%s'" % (name, Plugin.__name__)
)
try:
plugin = Plugin(name, asset, options, data)
with lib.maintained_selection():
print("Running %s" % plugin)
instance = plugin.process()
except Exception as e:
log.warning(e)
continue
plugins.append(plugin)
assert plugins, "No Creator plug-ins were run, this is a bug"
return instance
@lib.log
class Loader(list):
"""Load representation into host application
Arguments:
context (dict): avalon-core:context-1.0
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
.. versionadded:: 4.0
This class was introduced
"""
families = list()
representations = list()
order = 0
def __init__(self, context):
template = context["project"]["config"]["template"]["publish"]
data = {
key: value["name"]
for key, value in context.items()
}
data["root"] = registered_root()
data["silo"] = context["asset"]["silo"]
fname = template.format(**data)
self.fname = fname
def load(self, context, name=None, namespace=None, data=None):
"""Load asset via database
Arguments:
context (dict): Full parenthood of representation to load
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
data (dict, optional): Additional settings dictionary
"""
raise NotImplementedError("Loader.load() must be "
"implemented by subclass")
def update(self, container, representation):
"""Update `container` to `representation`
Arguments:
container (avalon-core:container-1.0): Container to update,
from `host.ls()`.
representation (dict): Update the container to this representation.
"""
raise NotImplementedError("Loader.update() must be "
"implemented by subclass")
def remove(self, container):
"""Remove a container
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted
"""
raise NotImplementedError("Loader.remove() must be "
"implemented by subclass")
@lib.log
class Creator(object):
"""Determine how assets are created"""
name = None
label = None
family = None
def __init__(self, name, asset, options=None, data=None):
self.name = name or self.name
self.options = options
# Default data
self.data = dict({
"id": "pyblish.jiminy.instance",
"family": self.family,
"asset": asset,
"subset": name,
"active": True
}, **(data or {}))
def process(self):
nodes = list()
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
instance = cmds.sets(nodes, name=self.name)
lib.imprint(instance, self.data)
return instance
def discover(superclass):
"""Find and return subclasses of `superclass`"""
registered = _registered_plugins.get(superclass, list())
plugins = dict()
# Include plug-ins from registered paths
for path in _registered_plugin_paths.get(superclass, list()):
path = os.path.normpath(path)
assert os.path.isdir(path), "%s is not a directory" % path
for fname in os.listdir(path):
# Ignore files which start with underscore
if fname.startswith("_"):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[mod_name] = module
except Exception as err:
print("Skipped: \"%s\" (%s)", mod_name, err)
continue
for plugin in plugin_from_module(superclass, module):
if plugin.__name__ in plugins:
print("Duplicate plug-in found: %s", plugin)
continue
plugins[plugin.__name__] = plugin
for plugin in registered:
if plugin.__name__ in plugins:
print("Warning: Overwriting %s" % plugin.__name__)
plugins[plugin.__name__] = plugin
return sorted(plugins.values(), key=lambda Plugin: Plugin.__name__)
def plugin_from_module(superclass, module):
"""Return plug-ins from module
Arguments:
superclass (superclass): Superclass of subclasses to look for
module (types.ModuleType): Imported module from which to
parse valid Avalon plug-ins.
Returns:
List of plug-ins, or empty list if none is found.
"""
types = list()
def recursive_bases(klass):
r = []
bases = klass.__bases__
r.extend(bases)
for base in bases:
r.extend(recursive_bases(base))
return r
for name in dir(module):
# It could be anything at this point
obj = getattr(module, name)
if not inspect.isclass(obj):
continue
# These are subclassed from nothing, not even `object`
if not len(obj.__bases__) > 0:
continue
# Use string comparison rather than `issubclass`
# in order to support reloading of this module.
bases = recursive_bases(obj)
if not any(base.__name__ == superclass.__name__ for base in bases):
continue
types.append(obj)
return types
def register_plugin(superclass, obj):
"""Register an individual `obj` of type `superclass`
Arguments:
superclass (type): Superclass of plug-in
obj (object): Subclass of `superclass`
"""
if superclass not in _registered_plugins:
_registered_plugins[superclass] = list()
if obj not in _registered_plugins[superclass]:
_registered_plugins[superclass].append(obj)
def register_plugin_path(superclass, path):
"""Register a directory of one or more plug-ins
Arguments:
superclass (type): Superclass of plug-ins to look for during discovery
path (str): Absolute path to directory in which to discover plug-ins
"""
if superclass not in _registered_plugin_paths:
_registered_plugin_paths[superclass] = list()
path = os.path.normpath(path)
if path not in _registered_plugin_paths[superclass]:
_registered_plugin_paths[superclass].append(path)
def registered_plugin_paths():
"""Return all currently registered plug-in paths"""
# Prohibit editing in-place
duplicate = {
superclass: paths[:]
for superclass, paths in _registered_plugin_paths.items()
}
return duplicate
def deregister_plugin(superclass, plugin):
"""Oppsite of `register_plugin()`"""
_registered_plugins[superclass].remove(plugin)
def deregister_plugin_path(superclass, path):
"""Oppsite of `register_plugin_path()`"""
_registered_plugin_paths[superclass].remove(path)
def register_root(path):
"""Register currently active root"""
log.info("Registering root: %s" % path)
_registered_root["_"] = path
def registered_root():
"""Return currently registered root"""
return os.path.normpath(
_registered_root["_"] or
Session.get("AVALON_PROJECTS") or ""
)
def on(event, callback):
"""Call `callback` on `event`
Register `callback` to be run when `event` occurs.
Example:
>>> def on_init():
... print("Init happened")
...
>>> on("init", on_init)
>>> del on_init
Arguments:
event (str): Name of event
callback (callable): Any callable
"""
if event not in _registered_event_handlers:
_registered_event_handlers[event] = weakref.WeakSet()
events = _registered_event_handlers[event]
events.add(callback)
def before(event, callback):
"""Convenience to `on()` for before-events"""
on("before_" + event, callback)
def after(event, callback):
"""Convenience to `on()` for after-events"""
on("after_" + event, callback)
def emit(event, args=None):
"""Trigger an `event`
Example:
>>> def on_init():
... print("Init happened")
...
>>> on("init", on_init)
>>> emit("init")
Init happened
>>> del on_init
Arguments:
event (str): Name of event
args (list, optional): List of arguments passed to callback
"""
callbacks = _registered_event_handlers.get(event, set())
args = args or list()
for callback in callbacks:
try:
callback(*args)
except Exception:
log.warning(traceback.format_exc())
def _register_callbacks():
for handler, event in self._events.copy().items():
if event is None:
continue
try:
OpenMaya.MMessage.removeCallback(event)
self._events[handler] = None
except RuntimeError as e:
log.info(e)
self._events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save
)
self._events[_before_scene_save] = OpenMaya.MSceneMessage.addCheckCallback(
OpenMaya.MSceneMessage.kBeforeSaveCheck, _before_scene_save
)
self._events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterNew, _on_scene_new
)
self._events[_on_maya_initialized] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kMayaInitialized, _on_maya_initialized
)
self._events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open
)
log.info("Installed event handler _on_scene_save..")
log.info("Installed event handler _before_scene_save..")
log.info("Installed event handler _on_scene_new..")
log.info("Installed event handler _on_maya_initialized..")
log.info("Installed event handler _on_scene_open..")
def _on_maya_initialized(*args):
emit("init", args)
if cmds.about(batch=True):
log.warning("Running batch mode ...")
return
# Keep reference to the main Window, once a main window exists.
self._parent = {
widget.objectName(): widget
for | |
<reponame>n0tpetya/discordbot
import asyncio
import random
import discord
from discord import Member, Guild, User
from discord import Profile
from datetime import datetime
client = discord.Client(intents=discord.Intents.all())
antworten = ['Ja', 'Nein', 'Wahrscheinlich', 'Unwahrscheinlich', 'Vielleicht', 'Sehr wahrscheinlich',
'Sehr unwarscheinlich']
beleidigungen = []
uhrzeit = datetime.now().strftime('%H:%M')
status = ['Drinking coffee☕️', 'Eating something🧁', 'Playing Minecraft🎮', 'Playing CS:GO🎮', 'Playing GTA V🎮', 'Playing Rocket League🎮', 'Vibing🎷', 'Doing work👨🏼🔧',
'Meeting friends👨👨👦', 'Listening to music🎧', 'On the phone📞', 'Writing with friends📱', 'On a party🎭', 'Going out👫']
def is_not_pinned(cmess):
return not cmess.pinned
@client.event # Start
async def on_ready():
print('Eingeloggt als {}'.format(client.user.name)) # Startup succes MSG
print(uhrzeit)
client.loop.create_task(status_task())
async def status_task(): # Schleife die Status des Bots ändert
while True:
await client.change_presence(activity=discord.Game('Status 1'), status=discord.Status.online)
await asyncio.sleep(5)
await client.change_presence(activity=discord.Game('Status 2'),
status=discord.Status.online)
await asyncio.sleep(5)
await client.change_presence(activity=discord.Game('{}'.format(random.choice(status))), status=discord.Status.online)
await asyncio.sleep(5)
@client.event # Befehle
async def on_message(message):
if message.author.bot:
return
# Hilfe-Liste
if message.content.startswith(".help"):
embedhelp = discord.Embed(title='Bot-Commands',
description='',
color=0x04ff00)
embedhelp.add_field(name='.help', value='Zeigt dir diese Liste an',
inline=False)
embedhelp.add_field(name='!oracle <Frage>', value='Gibt dir die Antwort auf deine Frage',
inline=False)
embedhelp.add_field(name='!uinfo <User>', value='Zeigt Informationen über einen Nutzer',
inline=False)
embedhelp.add_field(name='!forum', value='Zeigt dir den Link zur Webseite',
inline=False)
embedhelp.add_field(name='!youtube', value='Zeigt dir den Link zu unserem YouTube Channel',
inline=False)
embedhelp.add_field(name='!support', value='Zeigt dir Support Möglichkeiten an',
inline=False)
embedhelp.add_field(name='!ticket', value='Du kannst damit bei Problemen ein Ticket erstellen und mit den Admins in Kontakt treten.')
embedhelp.add_field(name='Bot erstellt von', value='Game-Forum.net | Deine Gaming Community!')
embedhelp.set_footer(text='Text')
await message.channel.send(embed=embedhelp)
# Nutzerinfos
if message.content.startswith('!uinfo'):
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed = discord.Embed(title='Userinfo für {}'.format(member.name),
description='Informationen über: {}'.format(member.mention),
color=0x04ff00)
embed.add_field(name='Server beigetreten',
value=member.joined_at.strftime('%d. %m. %Y um %H:%M:%S Uhr'),
inline=True)
embed.add_field(name='Discord beigetreten',
value=member.created_at.strftime('%d. %m. %Y um %H:%M:%S Uhr'),
inline=True)
rollen = ''
for role in member.roles:
if not role.is_default():
rollen += '{} \r\n'.format(role.mention)
if rollen:
embed.add_field(name='Rollen: ', value=rollen, inline=True)
embed.add_field(name='Bewertung', value=('Gebe gerne eine Bewertung zu {} ab!'.format(member.mention)),
inline=False)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text='Text')
react = await message.channel.send(embed=embed)
await react.add_reaction('👍')
await react.add_reaction('👎')
else:
await message.channel.send("Der Nutzer muss auf dem Discord sein!")
else:
await message.channel.send("Bitte gib einen Nutzernamen an!")
# Links
if message.content.startswith('!forum'):
embed2 = discord.Embed(title='Forum-Adresse',
description='Link',
color=0xfffb00)
embed2.set_footer(text='Game-Forum.net Discord Bot')
await message.channel.send(embed=embed2)
# Support
if message.content.startswith('!support'):
embed3 = discord.Embed(title='Support Möglichkeiten',
description='Möglichkeiten um Support zu erhalten',
color=0xfffb00)
embed3.add_field(name='Forum Support', value='Text',
inline=True)
embed3.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/support.png')
embed3.set_footer(text='Text')
await message.channel.send(embed=embed3)
# Team-join-leave-changename
# Join
if message.content.startswith('!jointeam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg1 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedjoin = discord.Embed(title='Team-Beitritt/Promotion',
description='Jemand ist dem Team beigetreten oder wurde befördert!',
color=0x22ff00)
embedjoin.add_field(name='Änderung', value='**{}**'.format(teammsg1),
inline=False)
embedjoin.set_thumbnail(url=member.avatar_url)
embedjoin.set_footer(text='Text')
await message.channel.send(embed=embedjoin)
# Leave
if message.content.startswith('!leaveteam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg2 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedleave = discord.Embed(title='Team-Leave/Degradierung',
description='Jemand hat das Team verlassen oder wurde degradiert!',
color=0xff0000)
embedleave.add_field(name='Änderung', value='**{}**'.format(teammsg2),
inline=False)
embedleave.set_thumbnail(url=member.avatar_url)
embedleave.set_footer(text='Text')
await message.channel.send(embed=embedleave)
# NameChange
if message.content.startswith('!nameteam') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
teammsg3 = ' '.join(args[2:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedchange = discord.Embed(title='Namensänderung',
description='Jemand hat seinen Namen geändert.',
color=0xfbff00)
embedchange.add_field(name='Änderung', value='**{}**'.format(teammsg3),
inline=False)
embedchange.set_thumbnail(url=member.avatar_url)
embedchange.set_footer(text='Text')
await message.channel.send(embed=embedchange)
# Geburtstag
if message.content.startswith('!birthday') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 2:
teammsg4 = ' '.join(args[1:])
await message.channel.purge(limit=1, check=is_not_pinned)
embedbday = discord.Embed(title='Geburtstag',
description='Jemand feiert heute seinen Geburtstag! Gratuliere ihm!',
color=0x00ffdd)
embedbday.add_field(name='Informationen', value='**{}**'.format(teammsg4),
inline=False)
embedbday.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/birthday.png')
embedbday.set_footer(text='Text')
await message.channel.send(embed=embedbday)
# Clearcommand
if message.content.startswith('!clear'):
if message.author.permissions_in(message.channel).manage_messages:
args = message.content.split(' ')
if len(args) == 2:
if args[1].isdigit():
count = int(args[1]) + 1
deleted = await message.channel.purge(limit=count, check=is_not_pinned)
embed4 = discord.Embed(title='Nachrichten gelöscht!',
description='Gelöschte Nachrichten (Angepinnte ausgeschlossen)',
color=0xff0000)
embed4.add_field(name='Anzahl gelöschter Nachrichten', value='{}'.format(len(deleted) - 1))
embed4.set_footer(text='Text')
await message.channel.send(embed=embed4)
await asyncio.sleep(3)
await message.channel.purge(limit=1, check=is_not_pinned)
else:
await message.channel.send('Bitte gib eine gültige Zahl ein!')
else:
await message.channel.send('Bitte gib eine gültige Zahl ein!')
else:
await message.channel.send('Du hast keine Berechtigung dazu!')
# Orakel
if message.content.startswith('!oracle'):
args = message.content.split(' ')
if len(args) >= 2:
frage = ' '.join(args[1:])
embed5 = discord.Embed(title='Deine Frage an das Orakel',
description='Die Antwort auf deine Frage (Ist vielleicht etwas schwammig aber besser als nix ._.)',
color=0xff0000)
if message.content.endswith('?'):
embed5.add_field(name='Frage', value='**{}**'.format(frage))
else:
embed5.add_field(name='Frage', value='**{}**'.format(frage) + '?')
embed5.add_field(name='Meine Antwort', value='{}'.format(random.choice(antworten)))
embed5.set_thumbnail(url='https://game-forum.net/wp-content/uploads/discord/support.png')
embed5.set_footer(text='Text')
await message.channel.send(embed=embed5)
else:
await message.channel.send("Bitte gib eine Frage an!")
# YouTube-Link
if message.content.startswith('!youtube'):
embedyoutube = discord.Embed(title='YouTube Kanal',
description='Link zum YouTube Kanal',
color=0xff0000)
embedyoutube.add_field(name='Link', value='Link')
embedyoutube.set_footer(text=Text')
await message.channel.send(embed=embedyoutube)
# Ban-System
if message.content.startswith('!ban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) >= 2:
banreason = ' '.join(args[2:])
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed7 = discord.Embed(title='Benutzer gebannt',
description='Ein Benutzer wurde gebannt',
color=0xff0000)
embed7.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embed7.add_field(name='Grund', value='{}'.format(banreason))
embed7.set_footer(text='Text')
await message.channel.send(embed=embed7)
embedbandm = discord.Embed(title='Du wurdest gebannt!',
description='Du wurdest vom Discord gebannt!',
color=0xff0000)
embedbandm.add_field(name='Grund', value='{}'.format(banreason))
embedbandm.set_footer(text='Text')
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedbandm)
except discord.errors.Forbidden:
print('Es konnte keine Bannachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der User ist ein Bot.')
await member.ban()
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
if message.content.startswith('!unban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
unbanreason = ' '.join(args[2:])
if len(args) >= 2:
user: User = discord.utils.find(lambda m: args[1] in m.user.name, await message.guild.bans()).user
if user:
await message.guild.unban(user)
embed8 = discord.Embed(title='Benutzer entbannt',
description='Ein Benutzer wurde entbannt',
color=0x04ff00)
embed8.add_field(name='Name des Benutzers', value='**{}**'.format(user.name))
embed8.add_field(name='Grund', value='{}'.format(unbanreason))
embed8.set_footer(text='Game-Forum Discord Bot')
await message.channel.send(embed=embed8)
embedunbandm = discord.Embed(title='Du wurdest entbannt!',
description='Du wurdest vom Discord entbannt!',
color=0x04ff00)
embedunbandm.add_field(name='Grund', value='{}'.format(unbanreason))
embedunbandm.set_footer(text='Du kannst dem Discord nun wieder beitreten!')
try:
if not user.bot:
if not user.dm_channel:
await user.create_dm()
await user.dm_channel.send(embed=embedunbandm)
except discord.errors.Forbidden:
print('Es konnte keine Unbannachricht an {0} gesendet werden.'.format(member.name))
if user.bot:
print('Der User ist ein Bot.')
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
#News-Command
if message.content.startswith('!news') and message.author.permissions_in(message.channel).send_tts_messages:
args = message.content.split(' ')
if len(args) >= 3:
titel = '{}'.format(args[1])
news = ' ' .join(args[2:])
embednews = discord.Embed(title='Eine neue News ist erschienen!',
description='',
color=0x04ff00)
embednews.add_field(name='{}'.format(titel), value='{}'.format(news),
inline=False)
embednews.set_footer(text="Text")
await message.channel.purge(limit=1, check=is_not_pinned)
await message.channel.send(embed = embednews)
if message.content.startswith('!kick') and message.author.guild_permissions.kick_members:
args = message.content.split(' ')
kickreason = ' '.join(args[2:])
if len(args) >= 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed9 = discord.Embed(title='Benutzer gekickt',
description='Ein Benutzer wurde gekickt',
color=0xfffb00)
embed9.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embed9.add_field(name='Grund', value='{}'.format(kickreason))
embed9.set_footer(text='Game-Forum Discord Bot')
embedkickdm = discord.Embed(title='Du wurdest gekickt!',
description='Du wurdest vom Discord gekickt!',
color=0xfffb00)
embedkickdm.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedkickdm.add_field(name='Grund', value='{}'.format(kickreason))
embedkickdm.set_footer(text='Du kannst dem Discord weiterhin beitreten!')
await message.channel.send(embed=embed9)
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedkickdm)
except discord.errors.Forbidden:
print('Es konnte keine Kicknachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der user ist ein Bot.')
await member.kick()
else:
await message.channel.send(f'Kein User mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
if message.content.startswith('!warn') and message.author.guild_permissions.manage_nicknames:
args = message.content.split(' ')
warnreason = ' '.join(args[2:])
if len(args) >= 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embedwarn = discord.Embed(title='Benutzer verwarnt',
description='Ein Benutzer wurde verwarnt',
color=0xfffb00)
embedwarn.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedwarn.add_field(name='Grund', value='{}'.format(warnreason))
embedwarn.set_footer(text='Game-Forum Discord Bot')
embedwarndm = discord.Embed(title='Du wurdest verwarnt',
description='Du wurdest am Discord verwarnt!',
color=0xfffb00)
embedwarndm.add_field(name='Name des Benutzers', value='**{}**'.format(member.name))
embedwarndm.add_field(name='Grund', value='{}'.format(warnreason))
embedwarndm.set_footer(text='Du kannst dem Discord weiterhin beitreten!')
await message.channel.send(embed=embedwarn)
try:
if not member.bot:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embedwarndm)
except discord.errors.Forbidden:
print('Es konnte keine Warnnachricht an {0} gesendet werden.'.format(member.name))
if member.bot:
print('Der User ist ein Bot.')
else:
await message.channel.send(f'Kein user mit dem Namen {args[1]} gefunden.')
else:
await message.channel.send('Bitte gib einen Namen an!')
@client.event # Beitritt des Servers
async def on_member_join(member): # Willkommennachricht und Rollenvergabe für User
mitgliedrolle = discord.utils.get(member.guild.roles, name='User')
botrolle = discord.utils.get(member.guild.roles, name='BOT')
willkommenschannel_id = # Channel ID
willkommenschannel = client.get_channel(willkommenschannel_id)
await willkommenschannel.send('Hey **{}**, willkommen auf dem Server!'.format(member.mention))
embed = discord.Embed(title='Willkommen {} auf dem Game-Forun.net Discord Server! 👍 😀'.format(member.name),
description='Wir heißen dich herzlich Willkommen',
color=0x04ff00)
embed.set_thumbnail(url=member.avatar_url)
await willkommenschannel.send(embed=embed)
if not member.bot:
await member.add_roles(mitgliedrolle)
embed = discord.Embed(title='Hey **{}**, willkommen auf dem Discord Server!'.format(member.name), description='Wir heißen dich herzlich willkommen und wünsche dir eine angenehme Zeit auf dem Server.', color=0x04ff00)
try:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embed)
except discord.errors.Forbidden:
print('Ich konnte keine persönliche Willkommennachricht an **{}** senden'.format(member.name))
if member.bot:
await member.add_roles(botrolle)
client.run('Bot | |
"""
<NAME>
A01700711
ITESM Campus QRO
Logistic Regression Algorithm
"""
import sys
# import math
import numpy
import pandas
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,accuracy_score
import matplotlib.pyplot as plot
import time
from sklearn.linear_model import LogisticRegression
__errors__= []
def calculateHyp(params,sample):
"""
Calculates the predicted value (hypothesis)
yHat = 1.0 / (1.0 + e ^ (-(b0+b1*x1...+bn*xn))) # Simplified Formula for Logistic Regression
NOTE : sigmoid func does this, this func as is just does (-(b0+b1*x1...+bn*xn)) and then calls sigmoid
yHat -> Predicted Value, b0 -> Bias, b1 -> Coefficient of 1st parameter , bn -> n coefficent
x1 -> first input (feature -> ex: skewness value), xn -> n input/feature
params -> Coefficientes of each parameter
sample -> instance contained in the features dataset
"""
acc = 0
acc = params * sample # pandas takes care of multiplying each parameter with the respective feature
# acc = acc.to_numpy().sum() # Converts the dataframe calculated to an array, and then proceeds to add all the values
# # Basically, it first does all the multiplication and afterwards adds all the values up
# acc = acc * (-1)
# Optimized version
acc = acc.sum(axis=1) # To sum by columns and not rows, axis is set to 1
acc = acc * (-1)
predictedValue = sigmoid(acc)
return predictedValue
def sigmoid(z):
"""
Takes care of the activation function given z
z -> (-(b0+b1*x1...+bn*xn))
"""
# sigmoid = 1 / (1 + math.exp(z))
sigmoid = 1 / (1 + numpy.exp(z)) # Exp with numpy works with pandas dataframes
return sigmoid
def gradientDescent(params,features,learning_rate,expectedValues):
"""
error = predictedValue - expectedValue
"""
error = 0
newParams = list(params)
# PREVIOUS Implementation
# for param in range(len(params)):
# sumErrors = 0 # Keeps an accumulate of the errors
# acc = 0 # coefficient value
# for instance in range(len(features)):
# yhat = calculateHyp(params,features.iloc[instance])
# error = yhat - expectedValues.iloc[instance]
# acc = acc + (error * features.iloc[instance,param]) # Calculate sumatory of gradient descent formula
# # acc = acc + (learning_rate * (expectedValues.iloc[instance] - yhat) * yhat * (1 - yhat) * )
# newParams[param] = params[param] - learning_rate * (1/len(features) * acc) # Here is the formula taught for gradient descent, acc is the value obtained from the sumatory
# Optimized Version
acc = 0
yHat = calculateHyp(params,features)
error = yHat - expectedValues
acc = numpy.dot(error,features) # numpy takes care of all of this by calculating the dot product, thus getting the five parameters
newParams = params - learning_rate * (1 / len(features) * acc)
return newParams
def show_errors(params, samples, y):
"""
Calculates error (based on Benji's implementation)
params -> Coefficientes of each parameter
samples -> All the training data
y -> All the real output data
"""
global __errors__
error_acum = 0
error = 0
# Optimized version
hyp = calculateHyp(params,samples)
error = numpy.vectorize(crossEntropy)(hyp,y)
error_acum = error.sum()
# PREVIOUS Implementation
# for instance in range(len(samples)):
# hyp = calculateHyp(params,samples.iloc[instance])
# error = crossEntropy(hyp, y.iloc[instance])
# error_acum = error_acum + error # this error is different from the one used to update, this is general for each sentence it is not for each individual param
#print("acum error %f " % (error_acum));
mean_error_param = error_acum/len(samples)
__errors__.append(mean_error_param)
return mean_error_param
def crossEntropy(predictedValue, realValue):
"""
Loss Function that is used to measure the performance of the classification model's predicted value
Code is from on https://ml-cheatsheet.readthedocs.io/en/latest/loss_functions.html
−(ylog(p)+(1−y)log(1−p)) -> The original Math Formula for Cross-Entropy
log is with base e (natural logarithm)
predictedValue -> Predicted values (Hypothesis)
realValue -> Real values
"""
if realValue == 1:
if predictedValue == 0: # Just like in Benji's code, this prevents log(0)
predictedValue = 0.001
# return -(math.log(predictedValue))
return -(numpy.log(predictedValue))
else:
if predictedValue == 1:
predictedValue = 0.999
# return -(math.log(1 - predictedValue))
return -(numpy.log(1 - predictedValue))
def scaleData(features):
"""
Normalizes features in order for gradient descent to work correctly (improves the convergence speed of the logistic regression algorithm)
features is an arg containing the sample of feature data to be normalized
Normalization is made using Rescaling (min-max normalization) (https://en.wikipedia.org/wiki/Feature_scaling)
returns the features of the dataset in a normalized manner
normalizedVal = (x - min(x)) / (max(x) - min(x)))
features -> The features to be normalized in the dataset
"""
# print("\nMAX OF DATASET")
# print(features.max())
maxValues = features.max()
# print("\nMIN OF DATASET")
# print(features.min())
minValues = features.min()
print("Initializing Normalization ...\n\n")
for instance in range(len(features)):
features.iloc[instance] = (features.iloc[instance] - minValues) / (maxValues - minValues)
return features
# File name of the Dataset
csv_fileName = "data_banknote_authentication.csv"
# Get a dataframe from the Dataset provided and name each column of the dataset (since in this case data was not labeled within the file)
dataset = pandas.read_csv(csv_fileName,names=["Variance","Skewness","Curtosis","Entropy","Class"])
# Output the first 5 lines of the dataset
print(dataset.head())
print("\n\nDataset Description\n")
print(dataset.describe())
features = pandas.DataFrame(dataset, columns = ["Variance","Skewness","Curtosis","Entropy"])
label = dataset["Class"]
# Data has first to be normalized [0,1]
features = scaleData(features)
print("Normalized Features")
print(features)
# authentic = dataset.loc[label == 1]
# counterfeit = dataset.loc[label == 0]
# Learning Rate for GD
alpha = 3.5
# Starting Coefficients (For each parameter)
params = [0,0,0,0]
# Add a new column and param for the Bias
# NOTE THE MAIN PROBLEM AFTER EXECUTING THE CODE WAS THE MISSING BIAS, SINCE THE MINIMUM ERROR WOULD GO AROUND 0.35...
params.append(0) # Bias coefficient
# NOTE After giving the coefficients (parameters) random values, finally the learning adjusted and therefore, a learning rate 3.8 was too big
# Establish parameters starting value with random values
params = numpy.random.rand(len(params))
print("PARAMS starting values")
print(params)
print("NEW PARAMS")
print(params)
features["Bias"] = 1
print("NEW FEATURES with BIAS")
print(features.head())
# Splits the Dataset into a training dataset and a test dataset
# In this case, the model is trained with 75% (3/4 of data) of the data given (NOTE: Since no random_state is given, the seed by default is random for each time the code is executed)
# Registered amateur mistake -> used test_size instead of train_size, wondering why numbers did not match
trainingFeatures, testFeatures, trainingLabel, testLabel = train_test_split(features, label, train_size=0.25)
print("TRAIN features")
print(trainingFeatures)
print("TRAIN LABEL")
print(trainingLabel)
# Scikit Implementation
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(trainingFeatures, trainingLabel)
y_pred = lr.predict(testFeatures)
print('Wrong/Misclassified samples: %d' % (testLabel != y_pred).sum())
print ("Accuracy of Model with Test Data (%) : ", accuracy_score(testLabel.values.tolist(), y_pred))
cm = confusion_matrix(testLabel.values.tolist(), y_pred)
print(cm)
# Current epoch iteration
epoch = 0
start_time = time.time()
predicted_Values = []
# While loop that stops until local minimum is reached or there is no further improvement in the bias
while True:
prevParams = list(params) # previous epoch bias
params = gradientDescent(params,trainingFeatures,alpha,trainingLabel)
error = show_errors(params, trainingFeatures, trainingLabel) # calculates the error between predicted and real data
params = list(params) # In order to leave in same format as before -> not in a numpy array
if(params == prevParams or epoch >= 20000 or error < 0.35): # the loop will only end if no further changes are made/seen in the params, the number of epochs given is reached or a given minimum error is reached
# for instance in range(len(trainingFeatures)):
# yhat = calculateHyp(params,trainingFeatures.iloc[instance])
# predicted_Values.append(round(yhat))
yHat = calculateHyp(params,trainingFeatures)
yHat = yHat.to_numpy().round()
# predicted_Values.append(round(yHat))
predicted_Values = yHat
# print("predicted values")
# print(predicted_Values)
# print("Expected -> %.3f , Predicted Value -> %.3f [%d]" % (trainingLabel.iloc[instance], yhat, round(yhat)))
print ("FINAL params :")
print (params)
print("THE TRAINING HAS FINISHED IN " + str(epoch) + " EPOCHS!!")
finishedTrainingTime = time.time() - start_time
print("The training lasted for " + str(finishedTrainingTime/60) + " minutes")
break
epoch += 1
print("EPOCHS -> " + str(epoch) + " and error -> " + str(error), end="\r") # Overwrites the current line
plot.plot(__errors__)
plot.title("Error")
plot.xlabel("# Epochs")
plot.ylabel("Error")
plot.show()
print ("Accuracy of Model with Training Data (%) : ", accuracy_score(trainingLabel.values.tolist(), predicted_Values))
cm = confusion_matrix(trainingLabel.values.tolist(), predicted_Values)
correctPredictions = cm[0][0] + cm[1][1]
wrongPredictions = cm[0][1] + cm[1][0]
print ("Confusion Matrix : \n", cm)
print("You have " + str(correctPredictions) + " of correct predictions and " + str(wrongPredictions) + " are wrong out of " + str(trainingFeatures.shape[0]))
plot.matshow(cm)
plot.title('Authentic vs Counterfeit Banknotes')
plot.colorbar()
plot.ylabel('Real Value')
plot.xlabel('Predicted Value')
plot.show()
print("\n\n####################################\n\n\n\nEnd of Training the Model\n")
predicted_Values = []
# Here you just predict data of test values with params from training
print("Here is prediction for the test DATA :")
yhat = calculateHyp(params,testFeatures)
# predicted_Values.append(round(yhat))
predicted_Values = yhat.to_numpy().round()
for instance in range(len(testFeatures)):
# yhat = calculateHyp(params,testFeatures.iloc[instance])
# predicted_Values.append(round(yhat))
print("Expected -> %.3f , | |
import os
import tensorflow as tf
import numpy as np
from time import time
#from sklearn.model_selection import train_test_split
from tensorflow.contrib import slim
from tensorflow.contrib.slim.python.slim.learning import train_step
from text_model.text_preprocessing import preprocess_df
from image_model import inception_v1
from text_model.text_preprocessing import _load_embedding_weights_glove
from image_model.im_model import load_batch_with_text#, get_init_fn
from datasets.convert_to_dataset import get_split_with_text
_RANDOM_SEED = 0
_CONFIG = {'mode': 'train',
'dataset_dir': 'data',
'text_dir': 'text_model',
'emb_dir': 'embedding_weights',
'filename': 'glove.6B.50d.txt',
'initial_lr': 1e-3,
'decay_factor': 0.3,
'batch_size': 64,
'rnn_size': 1024}
class WordModel():
def __init__(self, config):
self.config = config
vocab_size = config['vocab_size']
embedding_dim = config['embedding_dim']
post_size = config['post_size']
fc1_size = config['fc1_size']
nb_emotions = config['nb_emotions']
dropout = config['dropout']
max_grad_norm = config['max_grad_norm']
initial_lr = config['initial_lr']
self.input_data = tf.placeholder(tf.int32, [None, post_size])
self.target = tf.placeholder(tf.int32, [None])
self.learning_rate = tf.Variable(initial_lr, trainable=False)
# Use a placeholder to turn off dropout during testing
self.keep_prob = tf.placeholder(tf.float32)
# Placeholder for embedding weights
self.embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
# Word embedding
W_embedding = tf.get_variable('W_embedding', [vocab_size, embedding_dim], trainable=False)
self.embedding_init = W_embedding.assign(self.embedding_placeholder)
input_embed = tf.nn.embedding_lookup(W_embedding, self.input_data)
input_embed_dropout = tf.nn.dropout(input_embed, self.keep_prob)
# Rescale the mean by the actual number of non-zero values.
nb_finite = tf.reduce_sum(tf.cast(tf.not_equal(input_embed_dropout, 0.0), tf.float32), axis=1)
# If a post has zero finite elements, replace nb_finite by 1
nb_finite = tf.where(tf.equal(nb_finite, 0.0), tf.ones_like(nb_finite), nb_finite)
self.h1 = tf.reduce_mean(input_embed_dropout, axis=1) * post_size / nb_finite
# Fully connected layer
W_fc1 = tf.get_variable('W_fc1', [embedding_dim, fc1_size])
b_fc1 = tf.get_variable('b_fc1', [fc1_size])
h2 = tf.matmul(self.h1, W_fc1) + b_fc1
h2 = tf.nn.relu(h2)
W_softmax = tf.get_variable('W_softmax', [fc1_size, nb_emotions])
b_softmax = tf.get_variable('b_softmax', [nb_emotions])
logits = tf.matmul(h2, W_softmax) + b_softmax
labels = tf.one_hot(self.target, nb_emotions)
# Cross-entropy loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# Add to tensorboard
tf.summary.scalar('Loss', self.loss)
# Use gradient cliping
trainable_vars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_vars), max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_step = optimizer.apply_gradients(zip(grads, trainable_vars),
global_step=tf.contrib.framework.get_or_create_global_step())
#self.sample = tf.multinomial(tf.reshape(logits, [-1, vocab_size]), 1)
correct_pred = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), self.target)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Merge summaries
self.merged = tf.summary.merge_all()
class WordRNNModel():
def __init__(self, config):
self.config = config
batch_size = config['batch_size']
vocab_size = config['vocab_size']
embedding_dim = config['embedding_dim']
post_size = config['post_size']
fc1_size = config['fc1_size']
nb_emotions = config['nb_emotions']
dropout = config['dropout']
max_grad_norm = config['max_grad_norm']
initial_lr = config['initial_lr']
hidden_size = config['hidden_size']
self.input_data = tf.placeholder(tf.int32, [batch_size, post_size])
self.target = tf.placeholder(tf.int32, [batch_size])
self.seq_len = tf.placeholder(tf.int32, [batch_size])
self.learning_rate = tf.Variable(initial_lr, trainable=False)
# Use a placeholder to turn off dropout during testing
self.keep_prob = tf.placeholder(tf.float32)
# Placeholder for embedding weights
self.embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
# Word embedding
W_embedding = tf.get_variable('W_embedding', [vocab_size, embedding_dim], trainable=False)
self.embedding_init = W_embedding.assign(self.embedding_placeholder)
input_embed = tf.nn.embedding_lookup(W_embedding, self.input_data)
input_embed_dropout = tf.nn.dropout(input_embed, self.keep_prob)
cell = tf.contrib.rnn.BasicLSTMCell(hidden_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, input_embed_dropout, sequence_length=self.seq_len, dtype=tf.float32)
last_rnn_output = tf.gather_nd(rnn_outputs, tf.stack([tf.range(batch_size), self.seq_len - 1], axis=1))
# Fully connected layer
#W_fc1 = tf.get_variable('W_fc1', [hidden_size, fc1_size])
#b_fc1 = tf.get_variable('b_fc1', [fc1_size])
#h2 = tf.matmul(last_rnn_output, W_fc1) + b_fc1
#h2 = tf.nn.relu(h2)
W_softmax = tf.get_variable('W_softmax', [hidden_size, nb_emotions])
b_softmax = tf.get_variable('b_softmax', [nb_emotions])
logits = tf.matmul(last_rnn_output, W_softmax) + b_softmax
labels = tf.one_hot(self.target, nb_emotions)
# Cross-entropy loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# Add to tensorboard
tf.summary.scalar('Loss', self.loss)
# Use gradient cliping
trainable_vars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_vars), max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_step = optimizer.apply_gradients(zip(grads, trainable_vars),
global_step=tf.contrib.framework.get_or_create_global_step())
#self.sample = tf.multinomial(tf.reshape(logits, [-1, vocab_size]), 1)
correct_pred = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), self.target)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Merge summaries
self.merged = tf.summary.merge_all()
def _shuffling(X, y):
p = np.random.permutation(X.shape[0])
return X[p], y[p]
def _shuffling_rnn(X, seq_len, y):
p = np.random.permutation(X.shape[0])
return X[p], seq_len[p], y[p]
def run_model(sess, model, X, y, is_training, model_gen=None):
batch_size = model.config['batch_size']
dropout = model.config['dropout']
initial_lr = model.config['initial_lr']
lr_decay = model.config['lr_decay']
max_epoch_no_decay = model.config['max_epoch_no_decay']
nb_epochs = model.config['nb_epochs']
nb_batches = X.shape[0] / batch_size
if is_training:
# Iteration to print at
print_iter = list(np.linspace(0, nb_batches - 1, 11).astype(int))
dropout_param = dropout
ops = [model.merged, model.loss, model.accuracy, model.train_step]
else:
dropout_param = 1.0
ops = [tf.no_op(), model.loss, model.accuracy, tf.no_op()]
# Tensorboard writer
if is_training:
train_writer = tf.summary.FileWriter('text_model/loss', sess.graph)
for e in range(nb_epochs):
print ('Epoch: {0}'.format(e + 1))
lr_decay = lr_decay ** max(e + 1 - max_epoch_no_decay, 0)
# would be better to use a placeholder to assign. Here we're modifying the graph.
sess.run(tf.assign(model.learning_rate, initial_lr * lr_decay))
total_loss = 0.0
total_accuracy = 0.0
nb_iter = 0.0
loss_history = []
t0 = time()
X, y = _shuffling(X, y)
X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))
y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))
for i in range(nb_batches):
curr_input = X_reshaped[i, :, :]
curr_target = y_reshaped[i, :]
summary, curr_loss, curr_acc, _ = sess.run(ops, feed_dict={model.input_data: curr_input,
model.target: curr_target,
model.keep_prob: dropout_param})
if is_training:
train_writer.add_summary(summary, i + e * nb_batches)
total_loss += curr_loss
total_accuracy += curr_acc
nb_iter += 1
loss_history.append(curr_loss)
if (is_training and i in print_iter):
print('{0:.0f}% loss = {1:.3f}, accuracy = {2:.3f}, speed = {3:.0f} pps'\
.format(print_iter.index(i) * 10,
total_loss / nb_iter, total_accuracy / nb_iter,
(nb_iter * batch_size) / (time() - t0)))
if is_training:
pass
#first_char = np.array([[4]])
#samples = generate_chars(sess, model_gen, first_char, 2000)
#generated_chars = map(lambda x: model_gen.config['id_to_char'][x], samples)
#np.save('generated_chars.npy', np.array(generated_chars))
#generated_chars = np.load('generated_chars.npy')
#print('Generated characters:')
# Need to add encode('utf-8') because when using the server,
# sys.stdout.encoding is None
#print(u''.join(list(generated_chars)).replace(u'_', u' ').encode('utf-8'))
else:
print('Loss = {0:.3f}, accuracy = {1:.3f}, speed = {2:.0f} pps'\
.format(total_loss / nb_iter, total_accuracy / nb_iter,
(nb_iter * batch_size) / (time() - t0)))
#if (is_training and show_loss_graph):
#plt.plot(perplexity_history)
#plt.grid(True)
#plt.title('Epoch {0}'.format(e + 1))
#plt.xlabel('Mini-batch number')
#plt.ylabel('Perplexity per mini-batch')
#plt.show()
def run_model_rnn(sess, model, X, seq_len, y, is_training, model_gen=None):
batch_size = model.config['batch_size']
dropout = model.config['dropout']
initial_lr = model.config['initial_lr']
lr_decay = model.config['lr_decay']
max_epoch_no_decay = model.config['max_epoch_no_decay']
nb_epochs = model.config['nb_epochs']
nb_batches = X.shape[0] / batch_size
if is_training:
# Iteration to print at
print_iter = list(np.linspace(0, nb_batches - 1, 11).astype(int))
dropout_param = dropout
ops = [model.merged, model.loss, model.accuracy, model.train_step]
else:
dropout_param = 1.0
ops = [tf.no_op(), model.loss, model.accuracy, tf.no_op()]
# Tensorboard writer
if is_training:
train_writer = tf.summary.FileWriter('text_model/loss', sess.graph)
for e in range(nb_epochs):
print ('Epoch: {0}'.format(e + 1))
lr_decay = lr_decay ** max(e + 1 - max_epoch_no_decay, 0)
# would be better to use a placeholder to assign. Here we're modifying the graph.
sess.run(tf.assign(model.learning_rate, initial_lr * lr_decay))
total_loss = 0.0
total_accuracy = 0.0
nb_iter = 0.0
loss_history = []
t0 = time()
X, seq_len, y = _shuffling_rnn(X, seq_len, y)
X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))
seq_len_reshaped = seq_len[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))
y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))
for i in range(nb_batches):
curr_input = X_reshaped[i, :, :]
curr_seq_len = seq_len_reshaped[i, :]
curr_target = y_reshaped[i, :]
summary, curr_loss, curr_acc, _ = sess.run(ops, feed_dict={model.input_data: curr_input,
model.seq_len: curr_seq_len,
model.target: curr_target,
model.keep_prob: dropout_param})
if is_training:
train_writer.add_summary(summary, i + e * nb_batches)
total_loss += curr_loss
total_accuracy += curr_acc
nb_iter += 1
loss_history.append(curr_loss)
if (is_training and i in print_iter):
print('{0:.0f}% loss = {1:.3f}, accuracy = {2:.3f}, speed = {3:.0f} pps'\
.format(print_iter.index(i) * 10,
total_loss / nb_iter, total_accuracy / nb_iter,
(nb_iter * batch_size) / (time() - t0)))
if is_training:
pass
#first_char = np.array([[4]])
#samples = generate_chars(sess, model_gen, first_char, 2000)
#generated_chars = map(lambda x: model_gen.config['id_to_char'][x], samples)
#np.save('generated_chars.npy', np.array(generated_chars))
#generated_chars = np.load('generated_chars.npy')
#print('Generated characters:')
# Need to add encode('utf-8') because when using the server,
# sys.stdout.encoding is None
#print(u''.join(list(generated_chars)).replace(u'_', u' ').encode('utf-8'))
else:
print('Loss = {0:.3f}, accuracy = {1:.3f}, speed = {2:.0f} pps'\
.format(total_loss / nb_iter, total_accuracy / nb_iter,
(nb_iter * batch_size) / (time() - t0)))
#if (is_training and show_loss_graph):
#plt.plot(perplexity_history)
#plt.grid(True)
#plt.title('Epoch {0}'.format(e + 1))
#plt.xlabel('Mini-batch number')
#plt.ylabel('Perplexity per mini-batch')
#plt.show()
def generate_chars(sess, model, first_char, max_iteration):
ops = [model.final_state, model.sample]
current_char = first_char.copy()
numpy_state = sess.run(model.initial_state)
samples = []
for i in range(max_iteration):
# Sample from the multinomial distribution of the next character
numpy_state, sample = sess.run(ops, feed_dict={model.input_data: current_char,
model.initial_state: numpy_state,
model.keep_prob: 1.0})
samples.append(sample[0][0])
current_char = sample
return samples
def compute_sklearn_features():
"""Compute mean word embedding features for sklearn models.
"""
text_dir = 'text_model'
emb_dir = 'embedding_weights'
filename = 'glove.6B.50d.txt'
emb_name = 'glove'
emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']
post_size = 200
df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)
X = np.stack(df_all['text_list'])
y = df_all['search_query'].values
id_to_word = {i: k for k, i in word_to_id.iteritems()}
config = {'word_to_id': word_to_id,
'id_to_word': id_to_word,
'batch_size': 128,
'vocab_size': len(word_to_id),
'embedding_dim': embedding.shape[1],
'post_size': post_size,
'fc1_size': 16,
'nb_emotions': len(emotions),
'dropout': 1.0, # Proba to | |
{ get; } -> bool"""
@property
def Utc(self) -> TimeZoneInfo:"""Utc { get; } -> TimeZoneInfo"""
@staticmethod
def ClearCachedData():...
@staticmethod
def ConvertTime(dateTime: DateTime, sourceTimeZone: TimeZoneInfo, destinationTimeZone: TimeZoneInfo) -> DateTime:...
@staticmethod
def ConvertTime(dateTime: DateTime, destinationTimeZone: TimeZoneInfo) -> DateTime:...
@staticmethod
def ConvertTime(dateTimeOffset: DateTimeOffset, destinationTimeZone: TimeZoneInfo) -> DateTimeOffset:...
@staticmethod
def ConvertTimeBySystemTimeZoneId(dateTime: DateTime, sourceTimeZoneId: str, destinationTimeZoneId: str) -> DateTime:...
@staticmethod
def ConvertTimeBySystemTimeZoneId(dateTime: DateTime, destinationTimeZoneId: str) -> DateTime:...
@staticmethod
def ConvertTimeBySystemTimeZoneId(dateTimeOffset: DateTimeOffset, destinationTimeZoneId: str) -> DateTimeOffset:...
@staticmethod
def ConvertTimeFromUtc(dateTime: DateTime, destinationTimeZone: TimeZoneInfo) -> DateTime:...
@staticmethod
def ConvertTimeToUtc(dateTime: DateTime, sourceTimeZone: TimeZoneInfo) -> DateTime:...
@staticmethod
def ConvertTimeToUtc(dateTime: DateTime) -> DateTime:...
@staticmethod
def CreateCustomTimeZone(id: str, baseUtcOffset: TimeSpan, displayName: str, standardDisplayName: str, daylightDisplayName: str, adjustmentRules: Array[TimeZoneInfo.AdjustmentRule], disableDaylightSavingTime: bool) -> TimeZoneInfo:...
@staticmethod
def CreateCustomTimeZone(id: str, baseUtcOffset: TimeSpan, displayName: str, standardDisplayName: str, daylightDisplayName: str, adjustmentRules: Array[TimeZoneInfo.AdjustmentRule]) -> TimeZoneInfo:...
@staticmethod
def CreateCustomTimeZone(id: str, baseUtcOffset: TimeSpan, displayName: str, standardDisplayName: str) -> TimeZoneInfo:...
@staticmethod
def FindSystemTimeZoneById(id: str) -> TimeZoneInfo:...
@staticmethod
def FromSerializedString(source: str) -> TimeZoneInfo:...
def GetAdjustmentRules(self) -> Array[TimeZoneInfo.AdjustmentRule]:...
def GetAmbiguousTimeOffsets(self, dateTime: DateTime) -> Array[TimeSpan]:...
def GetAmbiguousTimeOffsets(self, dateTimeOffset: DateTimeOffset) -> Array[TimeSpan]:...
@staticmethod
def GetSystemTimeZones() -> _n_2_t_0[TimeZoneInfo]:...
def GetUtcOffset(self, dateTime: DateTime) -> TimeSpan:...
def GetUtcOffset(self, dateTimeOffset: DateTimeOffset) -> TimeSpan:...
def HasSameRules(self, other: TimeZoneInfo) -> bool:...
def IsAmbiguousTime(self, dateTime: DateTime) -> bool:...
def IsAmbiguousTime(self, dateTimeOffset: DateTimeOffset) -> bool:...
def IsDaylightSavingTime(self, dateTime: DateTime) -> bool:...
def IsDaylightSavingTime(self, dateTimeOffset: DateTimeOffset) -> bool:...
def IsInvalidTime(self, dateTime: DateTime) -> bool:...
def ToSerializedString(self) -> str:...
class AdjustmentRule(IEquatable[TimeZoneInfo.AdjustmentRule], _n_13_t_0, _n_13_t_1):
@property
def DateEnd(self) -> DateTime:"""DateEnd { get; } -> DateTime"""
@property
def DateStart(self) -> DateTime:"""DateStart { get; } -> DateTime"""
@property
def DaylightDelta(self) -> TimeSpan:"""DaylightDelta { get; } -> TimeSpan"""
@property
def DaylightTransitionEnd(self) -> TimeZoneInfo.TransitionTime:"""DaylightTransitionEnd { get; } -> TimeZoneInfo.TransitionTime"""
@property
def DaylightTransitionStart(self) -> TimeZoneInfo.TransitionTime:"""DaylightTransitionStart { get; } -> TimeZoneInfo.TransitionTime"""
@staticmethod
def CreateAdjustmentRule(dateStart: DateTime, dateEnd: DateTime, daylightDelta: TimeSpan, daylightTransitionStart: TimeZoneInfo.TransitionTime, daylightTransitionEnd: TimeZoneInfo.TransitionTime) -> TimeZoneInfo.AdjustmentRule:...
class TransitionTime(ValueType, IEquatable[TimeZoneInfo.TransitionTime], _n_13_t_0, _n_13_t_1):
@property
def Day(self) -> int:"""Day { get; } -> int"""
@property
def DayOfWeek(self) -> DayOfWeek:"""DayOfWeek { get; } -> DayOfWeek"""
@property
def IsFixedDateRule(self) -> bool:"""IsFixedDateRule { get; } -> bool"""
@property
def Month(self) -> int:"""Month { get; } -> int"""
@property
def TimeOfDay(self) -> DateTime:"""TimeOfDay { get; } -> DateTime"""
@property
def Week(self) -> int:"""Week { get; } -> int"""
@staticmethod
def CreateFixedDateRule(timeOfDay: DateTime, month: int, day: int) -> TimeZoneInfo.TransitionTime:...
@staticmethod
def CreateFloatingDateRule(timeOfDay: DateTime, month: int, week: int, dayOfWeek: DayOfWeek) -> TimeZoneInfo.TransitionTime:...
class TimeZoneNotFoundException(Exception, _n_13_t_0, _n_11_t_0):
def __init__(self) -> TimeZoneNotFoundException:...
def __init__(self, message: str, innerException: Exception) -> TimeZoneNotFoundException:...
def __init__(self, message: str) -> TimeZoneNotFoundException:...
class Tuple(_n_0_t_2, _n_0_t_1, IComparable, ITupleInternal, typing.Generic[T1]):
@property
def Item1(self) -> T1:"""Item1 { get; } -> T1"""
def __init__(self, item1: T1) -> Tuple:...
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object, item20: object, item21: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object, item20: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object, item6: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object, item5: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object, item4: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object, item3: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object, item2: object):
"""Extension from: System.TupleExtensions"""
def Deconstruct(self, item1: object):
"""Extension from: System.TupleExtensions"""
def ToValueTuple(self) -> ValueTuple[T1]:
"""Extension from: System.TupleExtensions"""
class TupleExtensions(object):
@staticmethod
def Deconstruct(value: Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any]]], item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object, item20: object, item21: object):...
@staticmethod
def Deconstruct(value: Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any]]], item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object, item20: object):...
@staticmethod
def Deconstruct(value: Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any]]], item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object, item19: object):...
@staticmethod
def Deconstruct(value: Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any]]], item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: object, item11: object, item12: object, item13: object, item14: object, item15: object, item16: object, item17: object, item18: object):...
@staticmethod
def Deconstruct(value: Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, typing.Any, Tuple[typing.Any, typing.Any, typing.Any]]], item1: object, item2: object, item3: object, item4: object, item5: object, item6: object, item7: object, item8: object, item9: object, item10: | |
l="Smooth Edges",
c=repeated_callback(smooth_edges_callback),
ann=Modeling.smooth_edges.__doc__,
bgc=color.color
)
pm.intSliderGrp(
'smooth_edges_iteration_intField',
v=100,
min=0,
max=100
)
color.change()
pm.button(
'create_curve_from_mesh_edges_button',
l="Curve From Mesh Edges",
c=repeated_callback(Modeling.create_curve_from_mesh_edges),
ann="Creates a curve from selected mesh edges",
bgc=color.color
)
color.change()
pm.button(
'vertex_aligned_locator_button',
l="Vertex Aligned Locator",
c=repeated_callback(Modeling.vertex_aligned_locator),
ann="Creates an aligned locator from selected vertices",
bgc=color.color
)
color.change()
with pm.rowLayout(nc=8, rat=(1, "both", 0), adj=1):
pm.text('set_pivot_text', l='Set Pivot', bgc=color.color)
pm.button(
'center_button',
l="C",
c=repeated_callback(
Modeling.set_pivot,
0
),
bgc=(0.8, 0.8, 0.8)
)
pm.button(
'minus_X_button',
l="-X",
c=repeated_callback(
Modeling.set_pivot,
1
),
bgc=(1.000, 0.500, 0.666)
)
pm.button(
'plus_X_button',
l="+X",
c=repeated_callback(
Modeling.set_pivot,
2
),
bgc=(1.000, 0.500, 0.666)
)
pm.button(
'minus_Y_button',
l="-Y",
c=repeated_callback(
Modeling.set_pivot,
3
),
bgc=(0.666, 1.000, 0.500)
)
pm.button(
'plus_Y_button',
l="+Y",
c=repeated_callback(
Modeling.set_pivot,
4
),
bgc=(0.666, 1.000, 0.500)
)
pm.button(
'minus_Z_button',
l="-X",
c=repeated_callback(
Modeling.set_pivot,
5
),
bgc=(0.500, 0.666, 1.000)
)
pm.button(
'plus_Z_button',
l="+X",
c=repeated_callback(
Modeling.set_pivot,
6
),
bgc=(0.500, 0.666, 1.000)
)
color.change()
with pm.rowLayout(nc=7, rat=(1, "both", 0), adj=1):
pm.text(l='Text. Res', bgc=color.color)
pm.button(
l="128",
c=repeated_callback(
Modeling.set_texture_res,
128
),
bgc=Color.colors[0]
)
pm.button(
l="256",
c=repeated_callback(
Modeling.set_texture_res,
256
),
bgc=Color.colors[1]
)
pm.button(
l="512",
c=repeated_callback(
Modeling.set_texture_res,
512
),
bgc=Color.colors[2]
)
pm.button(
l="1024",
c=repeated_callback(
Modeling.set_texture_res,
1024
),
bgc=Color.colors[3]
)
pm.button(
l='2048',
c=repeated_callback(
Modeling.set_texture_res,
2048
),
bgc=Color.colors[4]
)
pm.button(
l='4096',
c=repeated_callback(
Modeling.set_texture_res,
4096
),
bgc=Color.colors[5]
)
pm.text(l='========== UV Tools =============')
color.change()
pm.button(
'fix_uvsets_button',
l="Fix UVSets (DiffuseUV -> map1)",
c=repeated_callback(Modeling.fix_uvsets),
ann=Modeling.fix_uvsets,
bgc=color.color
)
color.change()
pm.button(
'select_zero_uv_area_faces_button',
l="Filter Zero UV Area Faces",
c=repeated_callback(Modeling.select_zero_uv_area_faces),
ann="Selects faces with zero uv area",
bgc=color.color
)
color.change()
pm.button(
'create_auto_uvmap_button',
l='Create Auto UVMap',
c=repeated_callback(Modeling.create_auto_uvmap),
ann=Modeling.create_auto_uvmap.__doc__,
bgc=color.color
)
with pm.rowLayout(nc=6, adj=1):
def transfer_uvs_button_callback(*args, **kwargs):
label_lut = {
'W': 0,
'L': 1,
'UV': 2,
'C': 3,
'T': 4
}
sample_space = label_lut[
pm.radioCollection(
'transfer_uvs_radio_collection',
q=1, sl=1
)
]
Modeling.transfer_uvs(sample_space=sample_space)
pm.button('transfer_uvs_button',
l="Transfer UVs",
c=repeated_callback(transfer_uvs_button_callback),
ann="Transfers UVs from one group to other, use it"
"for LookDev -> Alembic",
bgc=color.color)
pm.radioCollection('transfer_uvs_radio_collection')
button_with = 40
pm.radioButton(
'W', w=button_with, al='left', ann='World'
)
pm.radioButton(
'L', w=button_with, al='left', ann='Local'
)
pm.radioButton(
'UV', w=button_with, al='left', ann='UV'
)
pm.radioButton(
'C', w=button_with, al='left', ann='Component', sl=1
)
pm.radioButton(
'T', w=button_with, al='left', ann='Topology'
)
color.change()
pm.text(l='======= Manipulator Tools =======')
pm.button('set_to_point_button',
l="Set To Point",
c=repeated_callback(pm.mel.eval, "manipMoveOrient 1;"),
ann="Set manipulator to the point",
bgc=color.color)
pm.button('set_to_edge_button',
l="Set To Edge",
c=repeated_callback(pm.mel.eval, "manipMoveOrient 2;"),
ann="Set manipulator to the edge",
bgc=color.color)
pm.button('set_to_face_button',
l="Set To Face",
c=repeated_callback(pm.mel.eval, "manipMoveOrient 3;"),
ann="Set manipulator to the face",
bgc=color.color)
color.change()
pm.button('create_bbox_from_selection_button',
l="Create BBOX from selection",
c=repeated_callback(Modeling.bbox_from_selection),
ann=Modeling.bbox_from_selection.__doc__,
bgc=color.color)
# store commands
__commands__.extend(modeling_column_layout.children())
# ----- RIGGING ------
rigging_columnLayout = pm.columnLayout(
'rigging_columnLayout',
adj=True, cal="center",
rs=row_spacing
)
with rigging_columnLayout:
color.reset()
pm.button(
'create_joints_on_curve_ui_button',
l="Create Joints On Curve UI",
c=repeated_callback(Rigging.create_joints_on_curve_ui),
ann=Rigging.create_joints_on_curve_ui.__doc__,
bgc=color.color
)
pm.button(
'mirror_transformation_button',
l="Mirror Transformation",
c=repeated_callback(Rigging.mirror_transformation),
ann=Rigging.mirror_transformation.__doc__,
bgc=color.color
)
color.change()
pm.button(
'IKFKLimbRigger_button',
l="IK/FK Limb Rigger",
c=repeated_callback(Rigging.ik_fk_limb_rigger),
ann=Rigging.ik_fk_limb_rigger.__doc__,
bgc=color.color
)
with pm.rowLayout(nc=2, adj=1):
def ik_fk_limb_rigger_callback():
subdivision = pm.intField('bendy_ik_fk_subdivision_count_field', q=1, v=1)
Rigging.bendy_ik_fk_limb_rigger(subdivision=subdivision)
pm.button(
'bendy_ik_fk_limb_rigger_button',
l='IK/FK Limb Rigger (Bendy)',
c=repeated_callback(ik_fk_limb_rigger_callback),
ann=Rigging.bendy_ik_fk_limb_rigger.__doc__,
bgc=color.color
)
pm.intField('bendy_ik_fk_subdivision_count_field', min=0, v=2)
pm.button(
'ReverseFootRigger_button',
l="Reverse Foot Rigger",
c=repeated_callback(Rigging.reverse_foot_rigger),
ann=Rigging.reverse_foot_rigger.__doc__,
bgc=color.color
)
pm.button(
'squashStretchBendRigger_button',
l="Squash/Stretch/Bend Rigger",
c=repeated_callback(Rigging.squash_stretch_bend_rigger),
ann=Rigging.squash_stretch_bend_rigger.__doc__,
bgc=color.color
)
pm.button(
'setupStretchySplineIKCurve_button',
l="setup stretchy splineIK curve",
c=repeated_callback(Rigging.setup_stretchy_spline_ik_curve),
ann="connects necessary nodes to calculate arcLength "
"change in percent",
bgc=color.color
)
pm.button(
'selectJointsDeformingTheObject_button',
l="select joints deforming the object",
c=repeated_callback(Rigging.select_joints_deforming_object),
ann="select joints that deform the object",
bgc=color.color
)
color.change()
pm.button(
'create_axial_correction_group_button',
l="Create Axial Correction Groups",
c=repeated_callback(Rigging.axial_correction_group),
ann=Rigging.axial_correction_group.__doc__,
bgc=color.color
)
pm.button(
'create_zv_parent_compatible_groups_button',
l="Create ZV Parent Compatible Groups",
c=repeated_callback(Rigging.create_zv_parent_compatible_groups),
ann=Rigging.axial_correction_group.__doc__,
bgc=color.color
)
color.change()
pm.button(
'setClustersToAbsolute_button',
l="set selected clusters to absolute",
c=repeated_callback(Rigging.set_clusters_relative_state, 0),
ann="set Clusters to Absolute",
bgc=color.color
)
pm.button(
'setClustersToRelative_button',
l="set selected clusters to relative",
c=repeated_callback(
Rigging.set_clusters_relative_state, 1
),
ann="set Clusters to Relative",
bgc=color.color
)
color.change()
pm.button(
'addControllerShape_button',
l="add controller shape",
c=repeated_callback(Rigging.add_controller_shape),
ann="add the shape in the selected joint",
bgc=color.color
)
pm.button(
'replaceControllerShape_button',
l="replace controller shape",
c=repeated_callback(Rigging.replace_controller_shape),
ann="replaces the shape in the selected joint",
bgc=color.color
)
color.change()
def pin_controller_callback(color, *args):
"""Creates Pin Controller on the selected Vertex
"""
from anima.env.mayaEnv import rigging
vertex = pm.ls(sl=1)[0]
pc = rigging.PinController()
pc.color = color
pc.pin_to_vertex = vertex
pc.setup()
# TODO: Give the user the ability of selecting custom colors
with pm.rowLayout(nc=4, adj=1):
pm.text(l="Pin Controller")
pm.button('pin_controller_red_button', l="R",
c=repeated_callback(pin_controller_callback, [1, 0, 0]),
ann=pin_controller_callback.__doc__,
bgc=[1, 0, 0])
pm.button('pin_controller_green_button', l="G",
c=repeated_callback(pin_controller_callback, [0, 1, 0]),
ann=pin_controller_callback.__doc__,
bgc=[0, 1, 0])
pm.button('pin_controller_blue_button', l="B",
c=repeated_callback(pin_controller_callback, [0, 0, 1]),
ann=pin_controller_callback.__doc__,
bgc=[0, 0, 1])
pm.button('rivet_button', l="create rivet",
c=repeated_callback(mel.eval, 'rivet'),
ann="create rivet",
bgc=color.color)
pm.button('oyAutoRivet_button', l="auto rivet",
c=repeated_callback(mel.eval, 'oyAutoRivet'),
ann="auto rivet",
bgc=color.color)
pm.button(
'oyAutoRivetFollicle_button',
l="auto rivet (Follicle)",
c=repeated_callback(auxiliary.auto_rivet),
ann="creates a rivet setup by using hair follicles",
bgc=color.color
)
pm.button(
'rivet_per_face_button',
l="rivet per face (Follicle)",
c=repeated_callback(auxiliary.rivet_per_face),
ann="creates a rivet setup per selected face by using hair "
"follicles",
bgc=color.color
)
pm.button('create_hair_from_curves_button',
l="Create Hair From Curves",
c=repeated_callback(auxiliary.hair_from_curves),
ann="creates hair from curves",
bgc=color.color)
color.change()
pm.button('artPaintSkinWeightsTool_button',
l="paint weights tool",
c=repeated_callback(mel.eval, 'ArtPaintSkinWeightsTool'),
ann="paint weights tool",
bgc=color.color)
def skin_tools_ui_caller(*args):
from anima.env.mayaEnv.rigging import SkinToolsUI
st = SkinToolsUI()
st.ui()
pm.button('skin_tools_button', l="Skin Tools",
c=skin_tools_ui_caller,
ann="skin tools",
bgc=color.color)
pm.button('oyFixBoundJoint_button', l="fix_bound_joint",
c=repeated_callback(Rigging.fix_bound_joint),
ann="fix_bound_joint",
bgc=color.color)
pm.button('toggle_local_rotation_axes_button',
l="Toggle Local Rotation Axes",
c=repeated_callback(General.toggle_attributes, "displayLocalAxis"),
ann="Toggle Local Rotation Axes",
bgc=color.color)
pm.button('toggle_display_rotate_pivot_button',
l="Toggle Display Rotate Pivot",
c=repeated_callback(General.toggle_attributes, "displayRotatePivot"),
ann="Toggle Display Rotate Pivot",
bgc=color.color)
pm.button('seroBlendController_button',
l="seroBlendController",
c=repeated_callback(mel.eval, 'seroBlendController'),
ann="seroBlendController",
bgc=color.color)
pm.button('align_to_pole_vector_button',
l="Align To Pole Vector",
c=repeated_callback(auxiliary.align_to_pole_vector),
ann="align to pole vector",
bgc=color.color)
color.change()
pm.button('oyResetCharSet_button', l="oyResetCharSet",
c=repeated_callback(mel.eval, 'oyResetCharSet'),
ann="reset char set",
bgc=color.color)
pm.button('export_blend_connections_button',
l="Export blend connections",
c=repeated_callback(auxiliary.export_blend_connections),
ann="export blend connections",
bgc=color.color)
color.change()
pm.button('createFollicles_button', l="create follicles",
c=repeated_callback(Rigging.create_follicles),
ann="create follicles",
bgc=color.color)
color.change()
pm.button('oyResetTweaks_button', l="reset tweaks",
c=repeated_callback(Rigging.reset_tweaks),
ann="reset tweaks",
bgc=color.color)
color.change()
def add_cacheable_attribute_callback():
"""add <b>cacheable</b> attribute to the selected nodes
"""
for node in pm.selected():
Rigging.add_cacheable_attribute(node)
pm.button('add_cacheable_attr_button', l="add `cacheable` attribute",
c=repeated_callback(add_cacheable_attribute_callback),
ann=add_cacheable_attribute_callback.__doc__,
bgc=color.color)
# store commands
__commands__.extend(rigging_columnLayout.children())
# ----- RENDER ------
render_columnLayout = pm.columnLayout(
'render_columnLayout',
adj=True,
cal="center",
rs=row_spacing
)
with render_columnLayout:
color.reset()
color.change()
pm.button(
'update_render_settings_button',
l="Update Render Settings",
c=repeated_callback(Render.update_render_settings),
ann=Render.update_render_settings.__doc__,
bgc=color.color
)
color.change()
pm.button(
'delete_render_layers_button',
l="Delete Render Layers",
c=repeated_callback(Render.delete_render_layers),
ann=Render.delete_render_layers.__doc__,
bgc=color.color
)
pm.button(
'delete_display_layers_button',
l="Delete Display Layers",
c=repeated_callback(Render.delete_display_layers),
ann=Render.delete_display_layers.__doc__,
bgc=color.color
)
pm.button(
'delete_render_and_display_layers_button',
l="Delete Render and Display Layers",
c=repeated_callback(Render.delete_render_and_display_layers),
ann=Render.delete_render_and_display_layers.__doc__,
bgc=color.color
)
color.change()
pm.button(
'delete_unused_shading_nodes_button',
l="Delete Unused Shading Nodes",
c=repeated_callback(Render.delete_unused_shading_nodes),
ann=Render.delete_unused_shading_nodes.__doc__,
bgc=color.color
)
color.change()
pm.button(
'duplicate_input_graph_button',
l="Duplicate Input Graph",
c=repeated_callback(Render.duplicate_input_graph),
ann=Render.duplicate_input_graph.__doc__,
bgc=color.color
)
pm.button(
'duplicate_with_connections_button',
l="Duplicate With Connections To Network",
c=repeated_callback(Render.duplicate_with_connections),
ann=Render.duplicate_with_connections.__doc__,
bgc=color.color
)
color.change()
pm.text(l='=========== RedShift Tools ===========')
pm.button(
'generate_rs_from_selection_button',
l='Generate RSProxy From Selection',
c=repeated_callback(Render.generate_rsproxy_from_selection),
ann=Render.generate_rsproxy_from_selection.__doc__,
bgc=color.color
)
pm.button(
'generate_rs_from_selection_per_selection_button',
l='Generate RSProxy From Selection (Per Selection)',
c=repeated_callback(Render.generate_rsproxy_from_selection, True),
ann=Render.generate_rsproxy_from_selection.__doc__,
bgc=color.color
)
pm.button(
'set_rsproxy_to_bbox_button',
l='RSProxy -> Bounding Box',
c=repeated_callback(Render.rsproxy_to_bounding_box),
ann=Render.rsproxy_to_bounding_box.__doc__,
bgc=color.color
)
pm.button(
'set_rsproxy_to_preview_mesh_button',
l='RSProxy -> Preview Mesh',
c=repeated_callback(Render.rsproxy_to_preview_mesh),
ann=Render.rsproxy_to_preview_mesh.__doc__,
bgc=color.color
)
color.change()
pm.text(l='===== RedShift IC + IPC Bake =====')
pm.button(
'redshift_ic_ipc_bake_button',
l="Do Bake",
c=repeated_callback(Render.redshift_ic_ipc_bake),
ann=Render.redshift_ic_ipc_bake.__doc__,
bgc=color.color
)
pm.button(
'redshift_ic_ipc_bake_restore_button',
l="Restore Settings",
c=repeated_callback(Render.redshift_ic_ipc_bake_restore),
ann=Render.redshift_ic_ipc_bake_restore.__doc__,
bgc=color.color
)
pm.text(l='======================================')
color.change()
pm.button(
'submit_afanasy_button',
l="Afanasy Job Submitter",
c=repeated_callback(Render.afanasy_job_submitter),
ann=Render.afanasy_job_submitter.__doc__,
bgc=color.color
)
color.change()
pm.button(
'open_node_in_browser_button',
l="Open node in browser",
c=repeated_callback(Render.open_node_in_browser),
ann="Open node in browser",
bgc=color.color
)
color.change()
pm.button('auto_convert_to_redshift_button',
l="Auto Convert Scene To RedShift (BETA)",
c=repeated_callback(Render.auto_convert_to_redshift),
ann="Automatically converts the scene from Arnold to "
"Redshift, including materials and lights",
bgc=color.color)
pm.button('convert_nodes_to_redshift_button',
l="Convert Selected To RedShift (BETA)",
c=repeated_callback(Render.convert_nodes_to_redshift),
ann="Automatically converts the selected node from "
"Arnold to Redshift",
bgc=color.color)
def set_shape_attribute_wrapper(attr_name, value):
"""a wrapper function for set_shape_attribute
"""
apply_to_hierarchy = pm.checkBox(
apply_to_hierarchy_checkBox,
q=True,
v=True
)
disable_undo = pm.checkBox(
disable_undo_queue_check_box,
q=True,
v=True
)
Render.set_shape_attribute(
attr_name,
value,
apply_to_hierarchy,
disable_undo
)
with pm.rowLayout(nc=3, rat=(1, "both", 0), adj=1):
pm.text('renderThumbnailUpdate_text',
l="renderThumbnailUpdate",
bgc=color.color)
pm.button('set_renderThumbnailUpdate_ON_button',
l="ON",
c=repeated_callback(pm.renderThumbnailUpdate, 1),
bgc=(0, 1, 0))
pm.button('set_renderThumbnailUpdate_OFF_button',
l="OFF",
c=repeated_callback(pm.renderThumbnailUpdate, 0),
bgc=(1, 0, 0))
color.change()
pm.button('replaceShadersWithLast_button',
l="replace shaders with last",
c=repeated_callback(Render.replace_shaders_with_last),
ann="replace shaders with last",
bgc=color.color)
color.change()
pm.button('createTextureRefObject_button',
l="create texture ref. object",
c=repeated_callback(Render.create_texture_ref_object),
ann="create texture ref. object",
bgc=color.color)
pm.text(l='========== Texture Tools =============')
color.change()
pm.button('assign_substance_textures_button',
l="Assign Substance Textures",
c=repeated_callback(Render.assign_substance_textures),
ann=Render.assign_substance_textures.__doc__,
bgc=color.color)
color.change()
pm.button('normalize_texture_paths_button',
l="Normalize Texture Paths (remove $)",
c=repeated_callback(Render.normalize_texture_paths),
ann=Render.normalize_texture_paths.__doc__,
bgc=color.color)
pm.button('unnormalize_texture_paths_button',
l="Unnormalize Texture Paths (add $)",
c=repeated_callback(Render.unnormalize_texture_paths),
ann=Render.unnormalize_texture_paths.__doc__,
bgc=color.color)
color.change()
pm.button('assign_random_material_color_button',
l="Assign Material with Random Color",
c=repeated_callback(Render.assign_random_material_color),
ann=Render.assign_random_material_color.__doc__,
bgc=color.color)
pm.button('randomize_material_color_button',
l="Randomize Material Color",
c=repeated_callback(Render.randomize_material_color),
ann=Render.randomize_material_color.__doc__,
bgc=color.color)
color.change()
pm.button('import_image_as_plane_button',
l="Import Image as Plane",
c=repeated_callback(Render.import_image_as_plane),
ann=Render.import_image_as_plane.__doc__,
bgc=color.color)
pm.text(l='============ Camera Tools ============')
color.change()
pm.button(
'CameraFilmOffsetTool_button',
l="Camera Film Offset Tool",
c=repeated_callback(
camera_tools.camera_film_offset_tool
),
ann="Camera Film Offset Tool",
bgc=color.color
)
def camera_focus_plane_tool_callback():
"""callback for the camera_focus_plane_tool
"""
camera = pm.ls(sl=1)[0]
camera_tools.camera_focus_plane_tool(camera)
pm.button(
'CameraFocusPlaneTool_button',
l="Camera Focus Plane Tool",
c=repeated_callback(camera_focus_plane_tool_callback),
ann="Camera Film Offset Tool",
bgc=color.color
)
pm.button(
'lock_tracked_camera_channels_button',
l="Lock Tracked Camera Channels",
c=repeated_callback(camera_tools.lock_tracked_camera_channels),
ann=camera_tools.lock_tracked_camera_channels.__doc__,
bgc=color.color
)
color.change()
pm.text(l='===== Vertigo =====')
pm.button('vertigo_setup_look_at_button',
l="Setup -> Look At",
c=repeated_callback(Render.vertigo_setup_look_at),
ann="Setup Look At",
bgc=color.color)
| |
shape_cells=self.shape_cells, verb=0)
assert 'semicoarsening : True [1 2 1 3]' in var.__repr__()
var = solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=2,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
assert 'semicoarsening : True [2]' in var.__repr__()
with pytest.raises(ValueError, match='`semicoarsening` must be one o'):
solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=5,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
def test_linerelaxation(self):
var = solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=False,
linerelaxation=True, shape_cells=self.shape_cells, verb=0)
assert 'linerelaxation : True [4 5 6]' in var.__repr__()
var = solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=False,
linerelaxation=1247, shape_cells=self.shape_cells, verb=0)
assert 'linerelaxation : True [1 2 4 7]' in var.__repr__()
var = solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=False,
linerelaxation=1, shape_cells=self.shape_cells, verb=0,
clevel=1)
assert 'linerelaxation : True [1]' in var.__repr__()
assert_allclose(var.clevel, 1)
with pytest.raises(ValueError, match='`linerelaxation` must be one o'):
solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=False,
linerelaxation=-9, shape_cells=self.shape_cells, verb=0)
def test_sslsolver_and_cycle(self):
with pytest.raises(ValueError, match='At least `cycle` or `sslsolve'):
solver.MGParameters(
cycle=None, sslsolver=False, semicoarsening=False,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
var = solver.MGParameters(
cycle='F', sslsolver=True, semicoarsening=True,
linerelaxation=False, shape_cells=self.shape_cells, verb=0,
maxit=33)
assert "sslsolver : 'bicgstab'" in var.__repr__()
assert var.ssl_maxit == 33
assert var.maxit == 3
with pytest.raises(ValueError, match='`sslsolver` must be True'):
solver.MGParameters(
cycle='F', sslsolver='abcd', semicoarsening=0,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
with pytest.raises(ValueError, match='`sslsolver` must be True'):
solver.MGParameters(
cycle='F', sslsolver=4, semicoarsening=0,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
with pytest.raises(ValueError, match='`cycle` must be one of'):
solver.MGParameters(
cycle='G', sslsolver=False, semicoarsening=False,
linerelaxation=False, shape_cells=self.shape_cells, verb=0)
# 4. Wrong grid size
def test_wrong_grid_size(self):
with pytest.raises(ValueError, match='Nr. of cells must be at least'):
solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=False,
linerelaxation=False, shape_cells=(1, 2, 3), verb=0)
def test_bad_grid_size(self):
inp = {'cycle': 'F', 'sslsolver': False, 'semicoarsening': False,
'linerelaxation': False, 'verb': 0}
txt = ":: Grid not optimal for MG solver ::"
# One large lowest => warning.
var = solver.MGParameters(shape_cells=(11*2**3, 2**5, 2**4), **inp)
assert txt in var.__repr__()
# Large lowest, but clevel smaller => no warning.
var = solver.MGParameters(
shape_cells=(11*2**5, 11*2**4, 11*2**5), clevel=4, **inp)
assert txt not in var.__repr__()
# Large lowest, clevel bigger => warning.
var = solver.MGParameters(
shape_cells=(11*2**5, 11*2**4, 11*2**5), clevel=5, **inp)
assert txt in var.__repr__()
# Only 2 times dividable => warning.
var = solver.MGParameters(shape_cells=(2**3, 2**3, 2**3), **inp)
assert txt in var.__repr__()
def test_cprint(self, capsys):
var = solver.MGParameters(
cycle='F', sslsolver=False, semicoarsening=True, log=1,
linerelaxation=False, shape_cells=self.shape_cells, verb=2)
var.cprint('test', 3)
out, _ = capsys.readouterr()
assert out == ""
assert var.log_message == ""
var.cprint('test', 1)
out, _ = capsys.readouterr()
assert out == "test\n"
assert var.log_message == "test\n"
def test_RegularGridProlongator():
def prolon_scipy(grid, cgrid, efield, cefield):
CZ, CY = np.broadcast_arrays(grid.nodes_z, grid.nodes_y[:, None])
yz = np.r_[CY.ravel('F'), CZ.ravel('F')].reshape(-1, 2, order='F')
"""Compute SciPy alternative."""
for ixc in range(cgrid.shape_cells[0]):
# Bilinear interpolation in the y-z plane
fn = si.RegularGridInterpolator(
(cgrid.nodes_y, cgrid.nodes_z), cefield.fx[ixc, :, :],
bounds_error=False, fill_value=None)
hh = fn(yz).reshape(grid.shape_edges_x[1:], order='F')
# Piecewise constant interpolation in x-direction
efield[2*ixc, :, :] += hh
efield[2*ixc+1, :, :] += hh
return efield
def prolon_emg3d(grid, cgrid, efield, cefield):
"""Compute emg3d alternative."""
fn = solver.RegularGridProlongator(
cgrid.nodes_y, cgrid.nodes_z, grid.nodes_y, grid.nodes_z)
for ixc in range(cgrid.shape_cells[0]):
# Bilinear interpolation in the y-z plane
hh = fn(cefield.fx[ixc, :, :]).reshape(
grid.shape_edges_x[1:], order='F')
# Piecewise constant interpolation in x-direction
efield[2*ixc, :, :] += hh
efield[2*ixc+1, :, :] += hh
return efield
# Create fine grid.
nx = 2**7
hx = 50*np.ones(nx)
hx = np.array([4, 1.1, 2, 3])
hy = np.array([2, 0.1, 20, np.pi])
hz = np.array([1, 2, 5, 1])
grid = emg3d.TensorMesh([hx, hy, hz], origin=np.array([0, 0, 0]))
# Create coarse grid.
chx = np.diff(grid.nodes_x[::2])
cgrid = emg3d.TensorMesh([chx, chx, chx], origin=np.array([0, 0, 0]))
# Create empty fine grid fields.
efield1 = emg3d.Field(grid)
efield2 = emg3d.Field(grid)
# Create coarse grid field with some values.
cefield = emg3d.Field(cgrid)
cefield.fx = np.arange(cefield.fx.size)
cefield.fx = 1j*np.arange(cefield.fx.size)/10
# Compare
out1 = prolon_scipy(grid, cgrid, efield1.fx, cefield)
out2 = prolon_emg3d(grid, cgrid, efield2.fx, cefield)
assert_allclose(out1, out2)
def test_current_sc_dir():
hx = np.ones(4)
grid = emg3d.TensorMesh([hx, hx, hx], (0, 0, 0)) # Big enough
# Big enough, no change.
for sc_dir in range(4):
assert sc_dir == solver._current_sc_dir(sc_dir, grid)
# Small in all directions => always 0
grid = emg3d.TensorMesh([[2, 2], [2, 2], [2, 2]], (0, 0, 0))
for sc_dir in range(4):
assert 6 == solver._current_sc_dir(sc_dir, grid)
# Small in y, z
grid = emg3d.TensorMesh([hx, [2, 2], [2, 2]], (0, 0, 0))
assert 4 == solver._current_sc_dir(0, grid)
assert 6 == solver._current_sc_dir(1, grid)
assert 4 == solver._current_sc_dir(2, grid)
assert 4 == solver._current_sc_dir(3, grid)
# Small in x, z
grid = emg3d.TensorMesh([[2, 2], hx, [2, 2]], (0, 0, 0))
assert 5 == solver._current_sc_dir(0, grid)
assert 5 == solver._current_sc_dir(1, grid)
assert 6 == solver._current_sc_dir(2, grid)
assert 5 == solver._current_sc_dir(3, grid)
def test_current_lr_dir():
hx = np.ones(4)
grid = emg3d.TensorMesh([hx, hx, hx], (0, 0, 0)) # Big enough
# Big enough, no change.
for lr_dir in range(8):
assert lr_dir == solver._current_lr_dir(lr_dir, grid)
# Small in all directions => always 0
grid = emg3d.TensorMesh([[2, 2], [2, 2], [2, 2]], (0, 0, 0))
for lr_dir in range(8):
assert 0 == solver._current_lr_dir(lr_dir, grid)
# Small in y, z
grid = emg3d.TensorMesh([hx, [2, 2], [2, 2]], (0, 0, 0))
for lr_dir in [0, 1]:
assert lr_dir == solver._current_lr_dir(lr_dir, grid)
for lr_dir in [2, 3, 4]:
assert 0 == solver._current_lr_dir(lr_dir, grid)
for lr_dir in [5, 6, 7]:
assert 1 == solver._current_lr_dir(lr_dir, grid)
# Small in z
grid = emg3d.TensorMesh([hx, hx, [2, 2]], (0, 0, 0))
for lr_dir in [0, 1, 2, 6]:
assert lr_dir == solver._current_lr_dir(lr_dir, grid)
assert 0 == solver._current_lr_dir(3, grid)
assert 2 == solver._current_lr_dir(4, grid)
assert 1 == solver._current_lr_dir(5, grid)
assert 6 == solver._current_lr_dir(7, grid)
def test_terminate():
class MGParameters:
"""Fake MGParameters class."""
def __init__(self, verb=0, sslsolver=None):
self.verb = verb
self.exit_message = ""
self.sslsolver = sslsolver
self.maxit = 5
self.l2_refe = 1e-3
self.tol = 1e-2
def cprint(self, info, verbosity, **kwargs):
self.info = info
# Converged
var = MGParameters()
out = solver._terminate(var, 1e-6, 5e-6, 1)
assert out is True
assert var.exit_message == "CONVERGED"
assert " > " + var.exit_message in var.info
# Diverged if it is 10x larger than last or not a number.
var = MGParameters(verb=3)
out = solver._terminate(var, np.inf, 5e-6, 1)
assert out is True
assert var.exit_message == "DIVERGED"
assert " > " + var.exit_message in var.info
# Stagnated if it is >= the stagnation value.
var = MGParameters()
out = solver._terminate(var, 1e-3, 1e-4, 3)
assert out is True
assert var.exit_message == "STAGNATED"
assert " > " + var.exit_message in var.info
out = solver._terminate(var, 1e-3, 1e-4, 1) # Not on first iteration
assert out is False
var.sslsolver = True
with pytest.raises(solver._ConvergenceError):
solver._terminate(var, 1e-3, 1e-4, 3)
# Maximum iterations reached.
var = MGParameters(5)
out = solver._terminate(var, 1e-5, 1e-4, 5)
assert out is True
assert var.exit_message == "MAX. ITERATION REACHED, NOT CONVERGED"
assert " > " + var.exit_message in var.info
def test_restrict_model_parameters():
data = np.arange(1, 9).reshape((2, 2, 2), order='F')
# array([[[1, 5],
# [3, 7]],
#
# [[2, 6],
# [4, 8]]])
assert_allclose(solver._restrict_model_parameters(data, 0).ravel('F'),
[1+2+3+4+5+6+7+8])
assert_allclose(solver._restrict_model_parameters(data, 1).ravel('F'),
[1+3+5+7, 2+4+6+8])
assert_allclose(solver._restrict_model_parameters(data, 2).ravel('F'),
[1+5+2+6, 3+7+4+8])
assert_allclose(solver._restrict_model_parameters(data, 3).ravel('F'),
[1+2+3+4, 5+6+7+8])
assert_allclose(solver._restrict_model_parameters(data, 4).ravel('F'),
[1+2, 3+4, 5+6, 7+8])
assert_allclose(solver._restrict_model_parameters(data, 5).ravel('F'),
[1+3, 2+4, 5+7, 6+8])
assert_allclose(solver._restrict_model_parameters(data, 6).ravel('F'),
[1+5, 2+6, 3+7, 4+8])
def test_get_restriction_weights():
x = [500, 700, 800, 1000]
cx = [1200, 1800]
y = [2, 2, 2, 2]
cy = [4, 4]
grid = emg3d.TensorMesh([x, y, x], (0, 0, 0))
cgrid = emg3d.TensorMesh([cx, cy, cx], (0, 0, 0))
# 1. Simple example following equation 9, [Muld06]_.
wxl = np.array([350/250, 250/600, 400/900])
wx0 = np.array([1., 1., 1.])
wxr = np.array([350/600, 500/900, 400/500])
wyl = np.array([1, 0.5, 0.5])
wy0 = np.array([1., 1., 1.])
wyr = np.array([0.5, 0.5, 1])
wdl = np.array([0., 0., 0., 0., 0.]) # dummy
wd0 = np.array([1., 1., 1., 1., 1.]) # dummy
wdr = np.array([0., 0., 0., 0., 0.]) # dummy
for i in [0, 5, 6]:
wx, wy, wz = solver._get_restriction_weights(grid, cgrid, i)
if i not in [5, 6]:
assert_allclose(wxl, wx[0])
assert_allclose(wx0, wx[1])
assert_allclose(wxr, wx[2])
else:
assert_allclose(wdl, wx[0])
assert_allclose(wd0, wx[1])
assert_allclose(wdr, wx[2])
if i != 6:
assert_allclose(wyl, wy[0])
assert_allclose(wy0, wy[1])
assert_allclose(wyr, wy[2])
else:
assert_allclose(wdl, wy[0])
assert_allclose(wd0, wy[1])
assert_allclose(wdr, wy[2])
if i != 5:
assert_allclose(wxl, wz[0])
assert_allclose(wx0, wz[1])
assert_allclose(wxr, wz[2])
else:
assert_allclose(wdl, wz[0])
assert_allclose(wd0, wz[1])
assert_allclose(wdr, wz[2])
def test_ConvergenceError():
with pytest.raises(solver._ConvergenceError):
raise solver._ConvergenceError
def test_print_cycle_info(capsys):
var = solver.MGParameters(
verb=4, cycle='F', sslsolver=False, linerelaxation=False,
semicoarsening=False, shape_cells=(16, 8, 2))
var.level_all = [0, 1, | |
JG : Initial commit.
2017-06-05 - HP : Modified default bp-value to be on diagonal.
2017-08-15 - HP : Added flag to leave mirror line on the diagonal.
Code will not line mirror unsquare data.
'''
def sym2d(F, n):
angle = 360.0/n
out = np.zeros_like(F)
for ix in range(n):
out += snd.rotate(F, angle*ix, reshape=False)
out += snd.rotate(F, -angle*ix, reshape=False)
out /= 2*n
return out
def linmirr(F, x1, y1):
x0 = int(F.shape[0]/2.)
y0 = int(F.shape[1]/2.)
if x0 == y0:
# angle between mirror line and diagonal line, unit in rad
alpha = 3*np.pi/4-np.arctan((y1-y0)/(x1-x0))
# rotate the mirror line to be diagonal
Fr = snd.rotate(F, -alpha/np.pi*180, reshape=False)
Ff = Fr.T # diagnoal mirror
if diag:
return (Ff+Fr)/2.0
else:
Ffr = snd.rotate(Ff, alpha/np.pi*180, reshape=False) # rotate back
return (Ffr+F)/2.0
else:
return F
p = np.array(bp, dtype=np.float64)
if len(data.shape) is 2:
return linmirr(sym2d(data, n), p[0], p[1])
if len(data.shape) is 3:
out = np.zeros_like(data)
for ix, layer in enumerate(data):
out[ix] = linmirr(sym2d(layer, n), p[0], p[1])
return out
else:
print('ERR: Input must be 2D or 3D numpy array.')
def gauss2d(x, y, p, symmetric=False):
'''Create a two dimensional Gaussian.
Inputs:
x - Required : 1D array containing x values
y - Required : 1D array containing y values. The funciton will
create a meshgrid from x and y, but should be called
like f(x, y, *args).
p - Required : List of parameters that define the gaussian, in the
following order: [x0, y0, sigmax, sigmay, Amp, theta]
symmetric - Optional : Boolean, if True this will add another
Gaussian at (cenx - x0, ceny - y0), which is
useful in frequency space.
Returns:
G - 2D array containing Gaussian.
History:
2018-03-30 - HP : Initial commit.
'''
x0, y0, sx, sy, A, theta = [float(val) for val in p]
X, Y = np.meshgrid(x, y);
theta = np.radians(theta)
a = np.cos(theta)**2/(2*sx**2) + np.sin(theta)**2/(2*sy**2)
b = -np.sin(2*theta)/(4*sx**2) + np.sin(2*theta)/(4*sy**2)
c = np.sin(theta)**2/(2*sx**2) + np.cos(theta)**2/(2*sy**2)
G = A*np.exp( - (a*(X-x0)**2 + 2*b*(X-x0)*(Y-y0) + c*(Y-y0)**2))
if symmetric:
x1 = x[-1] + x[0] - x0
y1 = y[-1] + y[0] - y0
G += A*np.exp( -(a*(X-x1)**2 + 2*b*(X-x1)*(Y-y1) + c*(Y-y1)**2))
return G
def gauss_ring(x, y, major, sigma, minor=None, theta=0, x0=None, y0=None):
'''
Create a 2D ring with a gaussian cross section.
Inputs:
x - Required : 1D array containing x values
y - Required : 1D array containing y values. The funciton will
create a meshgrid from x and y, but should be called
like f(x, y, *args).
major - Required : Float. Radius of ring or major axis of ellipse.
sigma - Required : Float. Width of gaussian cross section
minor - Optional : Float. Radius of minor axis of ellipse
(default: major)
theta - Optional : Float. Angle in degrees to rotate ring.
x0 - Optional : Float. Center point of ring (default: center of x)
y0 - Optional : Float. Center point of ring (default: center of y)
Returns:
G - 2D array containing Gaussian ring.
History:
2018-05-09 - HP : Initial commit.
2018-05-10 - HP : Added center point.
'''
if minor is None:
minor = 1
if x0 is None:
x0 = (x[-1] + x[0])/2.0
if y0 is None:
y0 = (y[-1] + y[0])/2.0
x, y = x[:,None], y[None,:]
r = np.sqrt((x-x0)**2+(y-y0)**2)
T = np.arctan2(x-x0,y-x0) - np.radians(theta)
R = major*minor / np.sqrt((minor*np.cos(T))**2 + (major*np.sin(T))**2)
return np.exp(-(r-R)**2 / (2*sigma**2))
def gauss_theta(x, y, theta, sigma, x0=None, y0=None, symmetric=1):
'''
Create a radial wedge with a gaussian profile. For theta-dependent
amplitude modulation of a signal.
Inputs:
x - Required : 1D array containing x values
y - Required : 1D array containing y values. The funciton will
create a meshgrid from x and y, but should be called
like f(x, y, *args).
theta - Required : Float. Angle in degrees for center of wedge.
sigma - Required : Float. Width of gaussian cross section.
x0 - Optional : Float. Center point of arc wedge (default: center of x)
y0 - Optional : Float. Center point of arc wedge (default: center of y)
symmetric - Optional : Integer. Gives the wedge n-fold rotational
symmetry (default:1).
Returns:
G - 2D array containing Gaussian wedge.
History:
2018-05-10 - HP : Initial commit.
'''
def reduce_angle(theta):
'''Maps any angle in degrees to the interval -180 to 180'''
t = theta % 360
if t > 180:
t -= 360
return t
if x0 is None:
x0 = (x[-1] + x[0])/2.0
if y0 is None:
y0 = (y[-1] + y[0])/2.0
t = np.radians(reduce_angle(theta))
sig = np.radians(reduce_angle(sigma))
T = np.arctan2(x[:,None]-x0, y[None,:]-y0)
if -np.pi/2.0 < t <= np.pi/2.0:
amp = np.exp(-(T-t)**2/(2*sig**2))
else:
amp = np.exp(-(T-(np.sign(t)*np.pi-t))**2/(2*sig**2))[:,::-1]
for deg in np.linspace(360.0/symmetric, 360, int(symmetric)-1, endpoint=False):
amp += gauss_theta(x, y, np.degrees(t)+deg, sigma, x0=x0, y0=y0, symmetric=1)
return amp
class ngauss1d(object):
'''
Fits a combination of n gaussians to 1d data. Output is an object
containing various attributs pertaining to the fit. Includes the option to
fix a number of parameters in the fit by providing an array of 1 and 0
corresponding to each parameter: 1 - vary, 0 - fix.
Inputs:
x - x data
y - normalized y data: divide by the end point and subtract 1:
y = y_data / y_data[-1] - 1
p0 - array of initial guess parameters in the form:
[amp, mu, sigma, amp, mu, sigma, ...]
len(p0) must be divisible by 3.
vary - array with same lengh as p0 describing whether to vary or fix
each parameter. Defaults to varying all.
kwarg - additional keyword arguments passed to scipy.optimize.minimize
Usage: result = ngauss1d(x, y, p0, vary=None, **kwarg)
'''
def __init__(self, x, y, p0, vary=None, **kwarg):
if vary is None:
vary = np.zeros(len(p0)) + 1
if len(vary) != len(p0):
print('Warning - Vary not specified for each parameter.')
self._x = x
self._yf = y
self._ix = np.where(vary == 1)[0]
self._p0 = p0
self.output = opt.minimize(self._chix, p0[self._ix], **kwarg)
p = self._find_p(self.output.x)
self.fit = self.gaussn(*p)
self.p_unsrt = p.reshape(int(len(p0)/3), 3).T
mu = self.p_unsrt[1]
self.p = self.p_unsrt[:, mu.argsort()]
self.peaks = np.zeros([self.p.shape[1], len(self._x)])
self.peaks_unsrt = np.zeros([self.p.shape[1], len(self._x)])
for ix, (peak, peak_u) in enumerate(zip(self.p.T, self.p_unsrt.T)):
self.peaks[ix] = self.gaussn(*peak)
self.peaks_unsrt[ix] = self.gaussn(*peak_u)
def gaussn(self, *p):
g = np.zeros_like(self._x)
for i in range(0,len(p),3):
amp = abs(float(p[i]))
mu = float(p[i+1])
sigma = float(p[i+2])
g += amp * np.exp(-(self._x-mu)**2 / (2.0*sigma**2))
return g
def _find_p(self, p_vary):
p = np.zeros([len(self._p0)])
vix = 0
for ix in range(len(self._p0)):
if ix in self._ix:
p[ix] = p_vary[vix]
vix += 1
else:
p[ix] = self._p0[ix]
return p
def _chix(self, p_vary):
p = self._find_p(p_vary)
gf = self.gaussn(*p)
err = np.abs(gf - self._yf)
return np.log(sum(err**2))
def track_peak(x, z, p0, **kwarg):
'''
Simple interface for ngauss1d that tracks peaks on a 2d map in the y
direction.
Inputs:
x - x data
z - 2d map with peaks in the y direction
p0 - initial guess parameters for peaks
kwarg - additional keyword arguments passed to ngauss1d. Check
ngauss1d.__doc__ for details.
Usage: mu = track_peak(x, z, p0, vary=vary, bounds=bounds)
'''
mu = np.zeros([len(p0)/3, z.shape[0]])
for ix, yv in enumerate(z):
y = yv/yv[-1] - 1
result = ngauss1d(x, y, p0, **kwarg)
mu[:,ix] = result.p_unsrt[1,:]
return mu
def plane_subtract(data, deg, X0=None):
'''
Subtracts a polynomial plane from an image. The polynomial does not keep
any cross terms, i.e. not xy, only x^2 and y*2. I think this is fine and
just doesn't keep any hyperbolic-like terms.
Inputs:
data - Required : A 2D or 3D numpy array containing data
deg - Required : Degree of polynomial to be removed.
X0 - Optional : Guess optimization parameters for
scipy.optimize.minimize.
Returns:
subtractedData - Data with a polynomial plane removed.
History:
2017-07-13 - HP : Fixed so that it works up to at least 3rd order.
'''
def plane(a):
x = np.arange(subtract2D.norm.shape[1])
y = np.arange(subtract2D.norm.shape[0])
x = x[None,:]
y = y[:,None]
z = np.zeros_like(subtract2D.norm) + a[0]
N = int((len(a)-1)/2)
for k in range(1, N+1):
z | |
support scaling operations currently,
this interface only supports simple operations that can be deduced by the front-end.
enable_compile_cache (bool): Whether to save or load the cache of the graph compiled by front-end.
After enable_compile_cache is set to True, during the first execution, a hardware-independent
compilation cache is generated and exported to a MINDIR file. When the network is executed again,
if enable_compile_cache is still set to True and the network scripts are not changed,
the compile cache is loaded. Note that only limited automatic detection for the changes of
python scripts is supported by now, which means that there is a correctness risk. Default: False.
This is an experimental prototype that is subject to change and/or deletion.
compile_cache_path (str): Path to save the cache of the graph compiled by front-end. Default: ".".
If the specified directory does not exist, the system will automatically create the directory.
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
the ID of the current device in the cluster.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(precompile_only=True)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(enable_graph_kernel=True)
>>> context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(check_bprop=True)
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(enable_sparse=True)
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
>>> context.set_context(auto_tune_mode="GA,RL")
>>> context.set_context(grad_for_scalar=True)
>>> context.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
>>> context.set_context(pynative_synchronize=True)
"""
ctx = _context()
# set device target first
if 'device_target' in kwargs:
ctx.set_device_target(kwargs['device_target'])
device = ctx.get_param(ms_ctx_param.device_target)
if not device.lower() in __device_target__:
raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
f"but got device target {device}")
device = ctx.get_param(ms_ctx_param.device_target)
for key, value in kwargs.items():
if key in ('enable_profiling', 'profiling_options', 'enable_auto_mixed_precision',
'enable_dump', 'save_dump_path'):
logger.warning(f" '{key}' parameters will be deprecated."
"For details, please see the interface parameter API comments")
continue
if not _check_target_specific_cfgs(device, key):
continue
if hasattr(ctx, key):
setattr(ctx, key, value)
continue
if key in ctx.setters:
ctx.setters[key](ctx, value)
continue
# enum variables beginning with '_' are for internal use
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError(f"For 'context.set_context', the keyword argument {key} is not recognized! For detailed "
f"usage of 'set_context', please refer to the Mindspore official website.")
def get_context(attr_key):
"""
Get context attribute value according to the input key.
If some attributes are not set, they will be automatically obtained.
Args:
attr_key (str): The key of the attribute.
Returns:
Object, The value of given attribute key.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.get_context("device_target")
>>> context.get_context("device_id")
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
_ = _check_target_specific_cfgs(device, attr_key)
if hasattr(ctx, attr_key):
return getattr(ctx, attr_key)
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError(f"For 'context.get_context', the argument {attr_key} is not recognized! For detailed "
f"usage of 'get_context', please refer to the Mindspore official website.")
def _get_mode():
"""
Get execution mode. Only for internal using.
Returns:
Object: The Value of execution mode.
"""
ctx = _context()
return ctx.get_mode()
class ParallelMode:
"""
Parallel mode options.
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
- STAND_ALONE: Only one processor is working.
- DATA_PARALLEL: Distributes the data across different processors.
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
- AUTO_PARALLEL: Achieves parallelism automatically.
MODE_LIST: The list of all supported parallel modes.
"""
STAND_ALONE = "stand_alone"
DATA_PARALLEL = "data_parallel"
HYBRID_PARALLEL = "hybrid_parallel"
SEMI_AUTO_PARALLEL = "semi_auto_parallel"
AUTO_PARALLEL = "auto_parallel"
MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
@args_type_check(enable_ps=bool)
def set_ps_context(**kwargs):
"""
Set parameter server training mode context.
Note:
Some other environment variables should also be set for parameter server training mode.
These environment variables are listed below:
MS_SERVER_NUM: Server number
MS_WORKER_NUM: Worker number
MS_SCHED_HOST: Scheduler IP address
MS_SCHED_PORT: Scheduler port
MS_ROLE: The role of this process:
MS_SCHED: represents the scheduler,
MS_WORKER: represents the worker,
MS_PSERVER: represents the Server
Args:
enable_ps (bool): Whether to enable parameter server training mode.
Only after enable_ps is set True, the environment variables will be effective.
Default: False.
Raises:
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True)
"""
_set_ps_context(**kwargs)
def get_ps_context(attr_key):
"""
Get parameter server training mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute:
- enable_ps (bool): Whether to enable parameter server training mode.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> context.get_ps_context(enable_ps)
"""
return _get_ps_context(attr_key)
def reset_ps_context():
"""
Reset parameter server training mode context attributes to the default values:
- enable_ps: False.
"""
_reset_ps_context()
def set_fl_context(**kwargs):
"""
Set federated learning training mode context.
Args:
enable_fl (bool): Whether to enable federated learning training mode.
Default: False.
server_mode (str): Describe the server mode, which must one of 'FEDERATED_LEARNING' and 'HYBRID_TRAINING'.
Default: 'FEDERATED_LEARNING'.
ms_role (str): The process's role in the federated learning mode,
which must be one of 'MS_SERVER', 'MS_WORKER' and 'MS_SCHED'.
Default: 'MS_SERVER'.
worker_num (int): The number of workers. For current version, this must be set to 1 or 0.
server_num (int): The number of federated learning servers. Default: 0.
scheduler_ip (str): The scheduler IP. Default: '0.0.0.0'.
scheduler_port (int): The scheduler port. Default: 6667.
fl_server_port (int): The http port of the federated learning server.
Normally for each server this should be set to the same value. Default: 6668.
enable_fl_client (bool): Whether this process is federated learning client. Default: False.
start_fl_job_threshold (int): The threshold count of startFLJob. Default: 1.
start_fl_job_time_window (int): The time window duration for startFLJob in millisecond. Default: 3000.
share_secrets_ratio (float): The ratio for computing the threshold count of share secrets. Default: 1.0.
update_model_ratio (float): The ratio for computing the threshold count of updateModel. Default: 1.0.
cipher_time_window (int): The time window duration for each cipher round in millisecond. Default: 300000.
reconstruct_secrets_threshold (int): The threshold count of reconstruct threshold. Default: 0.
update_model_time_window (int): The time window duration for updateModel in millisecond. Default: 3000.
fl_name (string): The federated learning job name. Default: ''.
fl_iteration_num (int): Iteration number of federated learning,
which is the number of interactions between client and server. Default: 20.
client_epoch_num (int): Client training epoch number. Default: 25.
client_batch_size (int): Client training data batch size. Default: 32.
client_learning_rate (float): Client training learning rate. Default: 0.001.
worker_step_num_per_iteration (int): The worker's standalone training step number before communicating with
server. Default: 65.
dp_eps (float): Epsilon budget of differential privacy mechanism. The smaller the dp_eps, the better the
privacy protection effect. Default: 50.0.
dp_delta (float): Delta budget of differential privacy mechanism, which is usually equals the reciprocal of
client number. The smaller the dp_delta, the better the privacy protection effect. Default: 0.01.
dp_norm_clip (float): A factor used for clipping model's weights for differential mechanism. Its value is
suggested to be 0.5~2. Default: 1.0.
encrypt_type (string): Secure schema for federated learning, which can be 'NOT_ENCRYPT', 'DP_ENCRYPT',
'PW_ENCRYPT' or 'STABLE_PW_ENCRYPT'. If 'DP_ENCRYPT', differential privacy schema would be applied
for clients and the privacy protection effect would be determined by dp_eps, dp_delta and dp_norm_clip
as described above. If 'PW_ENCRYPT', pairwise secure aggregation would be applied to protect clients'
model from stealing in cross-device scenario. If 'STABLE_PW_ENCRYPT', pairwise secure aggregation would
be applied to protect clients' model from stealing in cross-silo scenario. Default: 'NOT_ENCRYPT'.
config_file_path (string): Configuration file path used by recovery. Default: ''.
scheduler_manage_port (int): scheduler manage port used to scale out/in. Default: 11202.
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: true.
client_password (str): <PASSWORD> decrypt the secret key stored in the client certificate.
server_password (str): Password to decrypt the secret key stored in the server certificate.
Raises:
ValueError: If input key is not the attribute in federated learning mode context.
Examples:
>>> context.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
"""
_set_ps_context(**kwargs)
def get_fl_context(attr_key):
"""
Get federated learning | |
HeaderError if access is attempted
but no VEP CSQ or ANN field is present in the header.
'''
if self.__csq_label is None:
self.csq_fields
return self.__csq_label
@csq_label.setter
def csq_label(self, c):
self.__csq_label = c
@property
def csq_fields(self):
'''
A list of CSQ field names in the order they are represented
in CSQ INFO field entries. Set to None on initialization.
Will raise a HeaderError if access is attempted but no VEP
CSQ or ANN field is present in the header.
'''
if self.__csq_fields is None:
if self.__csq_label is None:
try:
csq_header = self.metadata['INFO']['CSQ'][-1]
csq = 'CSQ'
except KeyError:
try:
csq_header = self.metadata['INFO']['ANN'][-1]
csq = 'ANN'
except KeyError:
raise HeaderError("No CSQ or ANN field in INFO header - "+
"unable to retrieve consequence fields.")
self.csq_label = csq
else:
csq = self.__csq_label
csq_header = self.metadata['INFO'][csq][-1]
match = self._csq_format_re.match(csq_header['Description'])
if match:
self.__csq_fields = match.group(1).split('|')
else:
raise HeaderError("Could not parse {} Format in ".format(csq)
+ "header. Unable to retrieve consequence "
+ "annotations.")
return self.__csq_fields
@csq_fields.setter
def csq_fields(self, csq):
self.__csq_fields = csq
def _parse_metadata(self):
'''
Extract INFO, FORMAT, FILTER and contig information from VCF
meta header and store in dicts
'''
#check first line is essential fileformat line
ff_match = self._meta_re.match(self.meta_header[0])
if not ff_match or ff_match.group(1) != 'fileformat':
raise ParseError("Error: First line of VCF should be fileformat" +
"metaheader (e.g. ##fileformat=VCFv4.2)")
else:
self.fileformat = ff_match.group(2)
for h in self.meta_header:
self._parse_header_line(h)
for field_type in ['FORMAT', 'INFO']:
try:
for field in self.metadata[field_type]:
self._set_field_translation(field_type, field)
except KeyError:
if field_type == 'INFO':
#no FORMAT field in header is common - e.g. sites only VCFs
warnings.warn("No '{}' field in header!"
.format(field_type), stacklevel=5)
def _parse_header_line(self, h):
'''
Parse a metaheader line and assign to self.metadata dict where
keys are the type of metadata line and values are dicts of IDs to
lists of either dicts of key-value pairs or string values.
'''
match_d = self._dict_re.match(h)
match_m = self._meta_re.match(h)
field = None
fid = None
if match_d:
#line is an e.g. INFO/FORMAT/FILTER/ALT/contig with multiple keys
field = match_d.group(1)
fid = match_d.group(2)
rest = match_d.group(3) or ''
d = dict([(x, y) for (x, y) in self._subd_re.findall(rest)])
if not field in self.metadata:
self.metadata[field] = {fid : [d]}
else:
if fid in self.metadata[field]:
#multiple values - extend list
self.metadata[field][fid].append(d)
else:
self.metadata[field][fid] = [d]
elif match_m:
field = match_m.group(1)
fid = match_m.group(2)
if field in self.metadata:
self.metadata[field].append(fid)
else:
self.metadata[field] = [fid]
else:
raise HeaderError("Invalid metaheader line {}".format(h))
if field in self._required_keys:
#ensure ALT/FORMAT/FILTER/INFO contain required keys
last = self.metadata[field][fid][-1]#check entry we've just added
for k in self._required_keys[field]:
if not k in last:
raise HeaderError(
"Missing required key '{}' in metaheader line: {}"
.format(k, h))
def _set_field_translation(self, field_type, field):
'''
returns a tuple of variable class type (int, float or str)
and whether the value requires splitting
'''
f = self.metadata[field_type][field][0]
ctype = None
if f['Type'] == 'String' or f['Type'] == 'Character':
ctype = str
elif f['Type'] == 'Float':
ctype = float
elif f['Type'] == 'Integer':
ctype = int
elif f['Type'] != 'Flag':
raise ParseError("Unrecognised FORMAT Type '{}' in header"
.format(f['Type']))
split = False
if f['Number'].isdigit():
if int(f['Number']) > 1:
split = True
else: #if not digit should be 'A', 'G', 'R' or '.' - split
split = True
if field_type == 'INFO':
setter = self._info_field_translater
elif field_type == 'FORMAT':
setter = self._format_field_translater
else:
raise ParseError("'{}' not recognised as a ".format(field_type) +
"field type for translation")
setter[field] = (ctype, split)
def add_header_field(self, name, string=None, field_type=None,
dictionary=None):
'''
Add a header field with given name and optional field type,
and dictionary of properties.
Args:
name: name of field to add
string: string to add to field. Ignored if 'dictionary'
is provided.
field_type:
type of field - e.g. if INFO/FILTER/FORMAT
field. Required if providing a dictionary.
dictionary:
a dict of keys to values for the given field.
If 'field_type' is specified, this arg must be
provided and must contain all the essential keys
for that field type. For example, an 'INFO'
field must have 'Number', 'Type', and
'Description' keys.
'''
if dictionary is None and string is None:
raise Exception("Either dictionary or string argument is required")
if field_type is not None and field_type in self._required_keys:
if dictionary is None:
raise Exception("Header type {} requires a dictionary.".format(
field_type))
if dictionary:
if not field_type:
raise Exception("field_type is required for use with " +
"dictionary")
if name in self.metadata[field_type]:
self.metadata[field_type][name].append(dictionary)
else:
self.metadata[field_type][name] = [dictionary]
self._set_field_translation(field_type, name)
h_vals = []
if field_type in self._required_keys:
#first append required keys in order
for k in self._required_keys[field_type]:
try:
h_vals.append(k + "=" + dictionary[k])
except KeyError:
raise Exception("Header type '{}'".format(field_type) +
" requires '{}' field." .format(k))
#then append any additional keys
for k in dictionary:
if k in self._required_keys[field_type]:
continue
h_vals.append(k + "=" + dictionary[k])
else:
for k in dictionary:
h_vals.append(k + "=" + dictionary[k])
h_string = str.join(',', ['##' + field_type + "=<ID=" + name] +
h_vals) + ">"
else:
h_string = '##' + name + '=' + string
if name in self.metadata:
self.metadata[name].append(string)
else:
self.metadata[name] = [string]
self.meta_header.append(h_string)
class VcfRecord(object):
'''
A single record from a Vcf created by parsing a non-header line
from a VCF file. May contain multiple alternate alleles.
'''
_gt_splitter = re.compile(r'[\/\|]')
__slots__ = ['header', 'cols', 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL',
'FILTER', 'INFO', 'FORMAT', '__SPAN', '__CSQ', 'samples',
'_sample_idx', '__CALLS', '__ALLELES', '__DECOMPOSED_ALLELES',
'__INFO_FIELDS', 'GT_FORMAT', '_SAMPLE_GTS', '_got_gts',
'_vep_allele', '_parsed_info', '_parsed_gts', '__is_sv']
def __init__(self, line, caller):
'''
VcfRecord objects require a line and a related VcfReader
object for initialization.
Args:
line: a non-header line from a VCF file without
newline character
caller: a VcfReader object (normally the same VcfReader
object that read the input line). Metadata and
sample information will be read from this object
in order to initialize the VcfRecord object.
'''
self.cols = line.split("\t", 9) #only collect first 9 columns initially
#splitting whole line on a VCF with
#lots of columns/samples is costly
try:
( self.CHROM, pos, self.ID, self.REF, self.ALT,
qual, self.FILTER, self.INFO ) = self.cols[:8]
except ValueError as err:
if len(self.cols) < 8:
raise ParseError("Not enough columns for following line:\n{}"
.format(line))
else:
raise err
self.POS = int(pos)
try:
self.QUAL = float(qual)
except ValueError:
self.QUAL = qual
self.SPAN = None
self.INFO_FIELDS = None
self.FORMAT = None
self.GT_FORMAT = None
self.CALLS = None
self.DECOMPOSED_ALLELES = None
self.ALLELES = None
self.header = caller.header
self.CSQ = None
self.IS_SV = None
self._SAMPLE_GTS = {}
self._vep_allele = {}
self._parsed_info = {}
self._parsed_gts = defaultdict(dict)
self._got_gts = False #flag indicating whether we've already
#retrieved GT dicts for every sample
if len(self.cols) > 8:
self.FORMAT = self.cols[8]
self.GT_FORMAT = self.FORMAT.split(':')
def __str__(self):
'''
Represent the VCF line as it should appear as a VCF record.
This uses the values stored in self.cols to avoid
potentially needless splitting of sample calls.
'''
return str.join("\t", self.cols)
def add_ids(self, ids, replace=False):
'''
Adds given IDs to the ID field of the VCF record. If the
record already has an ID (i.e. is not '.') these IDs are
added to the existing value(s) unless the replace
argument is True.
Args:
ids: A list of IDs to add.
replace: If True, existing ID values are replaced,
otherwise the given IDs are added to.
Default = False.
'''
if replace or self.ID == '.':
self.ID = str.join(';', ids)
else:
uids = set(ids + self.ID.split(';'))
self.ID = str.join(';', uids)
self.cols[2] = self.ID #also change cols so is reflected in __str__
def add_filter(self, filters, replace=False):
'''
Add provided filters to FILTER field of VCF. If replace is
False, filters will be added to existing FILTER annotations
except for those annotated just 'PASS'.
Args:
filters: A list of filter strings to add.
replace: If True replace any existing filter annotations.
'''
if not replace and self.FILTER != 'PASS':
filters.append(self.FILTER)
filters = set(filters)
self.FILTER = ';'.join(sorted(filters))
self.cols[6] = self.FILTER
@property
def IS_SV(self):
'''True if record represents a structural variant'''
if self.__is_sv is None:
self.__is_sv = 'SVTYPE' in self.INFO_FIELDS
return self.__is_sv
@IS_SV.setter
| |
or
~azure.mgmt.apimanagement.models.VirtualNetworkType
:param api_version_constraint: Control Plane Apis version constraint for
the API Management service.
:type api_version_constraint:
~azure.mgmt.apimanagement.models.ApiVersionConstraint
"""
_validation = {
'notification_sender_email': {'max_length': 100},
'provisioning_state': {'readonly': True},
'target_provisioning_state': {'readonly': True},
'created_at_utc': {'readonly': True},
'gateway_url': {'readonly': True},
'gateway_regional_url': {'readonly': True},
'portal_url': {'readonly': True},
'management_api_url': {'readonly': True},
'scm_url': {'readonly': True},
'developer_portal_url': {'readonly': True},
'public_ip_addresses': {'readonly': True},
'private_ip_addresses': {'readonly': True},
}
_attribute_map = {
'notification_sender_email': {'key': 'notificationSenderEmail', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'target_provisioning_state': {'key': 'targetProvisioningState', 'type': 'str'},
'created_at_utc': {'key': 'createdAtUtc', 'type': 'iso-8601'},
'gateway_url': {'key': 'gatewayUrl', 'type': 'str'},
'gateway_regional_url': {'key': 'gatewayRegionalUrl', 'type': 'str'},
'portal_url': {'key': 'portalUrl', 'type': 'str'},
'management_api_url': {'key': 'managementApiUrl', 'type': 'str'},
'scm_url': {'key': 'scmUrl', 'type': 'str'},
'developer_portal_url': {'key': 'developerPortalUrl', 'type': 'str'},
'hostname_configurations': {'key': 'hostnameConfigurations', 'type': '[HostnameConfiguration]'},
'public_ip_addresses': {'key': 'publicIPAddresses', 'type': '[str]'},
'private_ip_addresses': {'key': 'privateIPAddresses', 'type': '[str]'},
'virtual_network_configuration': {'key': 'virtualNetworkConfiguration', 'type': 'VirtualNetworkConfiguration'},
'additional_locations': {'key': 'additionalLocations', 'type': '[AdditionalLocation]'},
'custom_properties': {'key': 'customProperties', 'type': '{str}'},
'certificates': {'key': 'certificates', 'type': '[CertificateConfiguration]'},
'enable_client_certificate': {'key': 'enableClientCertificate', 'type': 'bool'},
'disable_gateway': {'key': 'disableGateway', 'type': 'bool'},
'virtual_network_type': {'key': 'virtualNetworkType', 'type': 'str'},
'api_version_constraint': {'key': 'apiVersionConstraint', 'type': 'ApiVersionConstraint'},
}
def __init__(self, *, notification_sender_email: str=None, hostname_configurations=None, virtual_network_configuration=None, additional_locations=None, custom_properties=None, certificates=None, enable_client_certificate: bool=False, disable_gateway: bool=False, virtual_network_type="None", api_version_constraint=None, **kwargs) -> None:
super(ApiManagementServiceBaseProperties, self).__init__(**kwargs)
self.notification_sender_email = notification_sender_email
self.provisioning_state = None
self.target_provisioning_state = None
self.created_at_utc = None
self.gateway_url = None
self.gateway_regional_url = None
self.portal_url = None
self.management_api_url = None
self.scm_url = None
self.developer_portal_url = None
self.hostname_configurations = hostname_configurations
self.public_ip_addresses = None
self.private_ip_addresses = None
self.virtual_network_configuration = virtual_network_configuration
self.additional_locations = additional_locations
self.custom_properties = custom_properties
self.certificates = certificates
self.enable_client_certificate = enable_client_certificate
self.disable_gateway = disable_gateway
self.virtual_network_type = virtual_network_type
self.api_version_constraint = api_version_constraint
class ApiManagementServiceCheckNameAvailabilityParameters(Model):
"""Parameters supplied to the CheckNameAvailability operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name to check for availability.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, name: str, **kwargs) -> None:
super(ApiManagementServiceCheckNameAvailabilityParameters, self).__init__(**kwargs)
self.name = name
class ApiManagementServiceGetSsoTokenResult(Model):
"""The response of the GetSsoToken operation.
:param redirect_uri: Redirect URL to the Publisher Portal containing the
SSO token.
:type redirect_uri: str
"""
_attribute_map = {
'redirect_uri': {'key': 'redirectUri', 'type': 'str'},
}
def __init__(self, *, redirect_uri: str=None, **kwargs) -> None:
super(ApiManagementServiceGetSsoTokenResult, self).__init__(**kwargs)
self.redirect_uri = redirect_uri
class ApiManagementServiceIdentity(Model):
"""Identity properties of the Api Management service resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of identity used for the resource. The
type 'SystemAssigned, UserAssigned' includes both an implicitly created
identity and a set of user assigned identities. The type 'None' will
remove any identities from the service. Possible values include:
'SystemAssigned', 'UserAssigned', 'SystemAssigned, UserAssigned', 'None'
:type type: str or ~azure.mgmt.apimanagement.models.ApimIdentityType
:ivar principal_id: The principal id of the identity.
:vartype principal_id: str
:ivar tenant_id: The client tenant id of the identity.
:vartype tenant_id: str
:param user_assigned_identities: The list of user identities associated
with the resource. The user identity
dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/
providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.apimanagement.models.UserIdentityProperties]
"""
_validation = {
'type': {'required': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserIdentityProperties}'},
}
def __init__(self, *, type, user_assigned_identities=None, **kwargs) -> None:
super(ApiManagementServiceIdentity, self).__init__(**kwargs)
self.type = type
self.principal_id = None
self.tenant_id = None
self.user_assigned_identities = user_assigned_identities
class ApiManagementServiceNameAvailabilityResult(Model):
"""Response of the CheckNameAvailability operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name_available: True if the name is available and can be used to
create a new API Management service; otherwise false.
:vartype name_available: bool
:ivar message: If reason == invalid, provide the user with the reason why
the given name is invalid, and provide the resource naming requirements so
that the user can select a valid name. If reason == AlreadyExists, explain
that <resourceName> is already in use, and direct them to select a
different name.
:vartype message: str
:param reason: Invalid indicates the name provided does not match the
resource provider’s naming requirements (incorrect length, unsupported
characters, etc.) AlreadyExists indicates that the name is already in use
and is therefore unavailable. Possible values include: 'Valid', 'Invalid',
'AlreadyExists'
:type reason: str or
~azure.mgmt.apimanagement.models.NameAvailabilityReason
"""
_validation = {
'name_available': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'NameAvailabilityReason'},
}
def __init__(self, *, reason=None, **kwargs) -> None:
super(ApiManagementServiceNameAvailabilityResult, self).__init__(**kwargs)
self.name_available = None
self.message = None
self.reason = reason
class ApimResource(Model):
"""The Resource definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type for API Management resource is set to
Microsoft.ApiManagement.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, tags=None, **kwargs) -> None:
super(ApimResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = tags
class ApiManagementServiceResource(ApimResource):
"""A single API Management service resource in List or Get response.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type for API Management resource is set to
Microsoft.ApiManagement.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param notification_sender_email: Email address from which the
notification will be sent.
:type notification_sender_email: str
:ivar provisioning_state: The current provisioning state of the API
Management service which can be one of the following:
Created/Activating/Succeeded/Updating/Failed/Stopped/Terminating/TerminationFailed/Deleted.
:vartype provisioning_state: str
:ivar target_provisioning_state: The provisioning state of the API
Management service, which is targeted by the long running operation
started on the service.
:vartype target_provisioning_state: str
:ivar created_at_utc: Creation UTC date of the API Management service.The
date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified
by the ISO 8601 standard.
:vartype created_at_utc: datetime
:ivar gateway_url: Gateway URL of the API Management service.
:vartype gateway_url: str
:ivar gateway_regional_url: Gateway URL of the API Management service in
the Default Region.
:vartype gateway_regional_url: str
:ivar portal_url: Publisher portal endpoint Url of the API Management
service.
:vartype portal_url: str
:ivar management_api_url: Management API endpoint URL of the API
Management service.
:vartype management_api_url: str
:ivar scm_url: SCM endpoint URL of the API Management service.
:vartype scm_url: str
:ivar developer_portal_url: DEveloper Portal endpoint URL of the API
Management service.
:vartype developer_portal_url: str
:param hostname_configurations: Custom hostname configuration of the API
Management service.
:type hostname_configurations:
list[~azure.mgmt.apimanagement.models.HostnameConfiguration]
:ivar public_ip_addresses: Public Static Load Balanced IP addresses of the
API Management service in Primary region. Available only for Basic,
Standard and Premium SKU.
:vartype public_ip_addresses: list[str]
:ivar private_ip_addresses: Private Static Load Balanced IP addresses of
the API Management service in Primary region which is deployed in an
Internal Virtual Network. Available only for Basic, Standard and Premium
SKU.
:vartype private_ip_addresses: list[str]
:param virtual_network_configuration: Virtual network configuration of the
API Management service.
:type virtual_network_configuration:
~azure.mgmt.apimanagement.models.VirtualNetworkConfiguration
:param additional_locations: Additional datacenter locations of the API
Management service.
:type additional_locations:
list[~azure.mgmt.apimanagement.models.AdditionalLocation]
:param custom_properties: Custom properties of the API Management
service.</br>Setting
`Microsoft.WindowsAzure.ApiManagement.Gateway.Security.Ciphers.TripleDes168`
will disable the cipher TLS_RSA_WITH_3DES_EDE_CBC_SHA for all TLS(1.0, 1.1
and 1.2).</br>Setting
`Microsoft.WindowsAzure.ApiManagement.Gateway.Security.Protocols.Tls11`
can be used to disable just TLS 1.1.</br>Setting
`Microsoft.WindowsAzure.ApiManagement.Gateway.Security.Protocols.Tls10`
can be used to disable TLS 1.0 on an API Management service.</br>Setting
`Microsoft.WindowsAzure.ApiManagement.Gateway.Security.Backend.Protocols.Tls11`
can be used to disable just TLS 1.1 for communications with
backends.</br>Setting
`Microsoft.WindowsAzure.ApiManagement.Gateway.Security.Backend.Protocols.Tls10`
can be used to disable TLS 1.0 for communications with
| |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TriggerList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the TriggerList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerList
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerList
"""
super(TriggerList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid}
self._uri = '/Accounts/{account_sid}/Usage/Triggers.json'.format(**self._solution)
def create(self, callback_url, trigger_value, usage_category,
callback_method=values.unset, friendly_name=values.unset,
recurring=values.unset, trigger_by=values.unset):
"""
Create a new TriggerInstance
:param unicode callback_url: URL Twilio will request when the trigger fires
:param unicode trigger_value: the value at which the trigger will fire
:param TriggerInstance.UsageCategory usage_category: The usage category the trigger watches
:param unicode callback_method: HTTP method to use with callback_url
:param unicode friendly_name: A user-specified, human-readable name for the trigger.
:param TriggerInstance.Recurring recurring: How this trigger recurs
:param TriggerInstance.TriggerField trigger_by: The field in the UsageRecord that fires the trigger
:returns: Newly created TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackUrl': callback_url,
'TriggerValue': trigger_value,
'UsageCategory': usage_category,
'CallbackMethod': callback_method,
'FriendlyName': friendly_name,
'Recurring': recurring,
'TriggerBy': trigger_by,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'])
def stream(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: Filter by recurring
:param TriggerInstance.TriggerField trigger_by: Filter by trigger by
:param TriggerInstance.UsageCategory usage_category: Filter by Usage Category
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Lists TriggerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param TriggerInstance.Recurring recurring: Filter by recurring
:param TriggerInstance.TriggerField trigger_by: Filter by trigger by
:param TriggerInstance.UsageCategory usage_category: Filter by Usage Category
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
return list(self.stream(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
limit=limit,
page_size=page_size,
))
def page(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of TriggerInstance records from the API.
Request is executed immediately
:param TriggerInstance.Recurring recurring: Filter by recurring
:param TriggerInstance.TriggerField trigger_by: Filter by trigger by
:param TriggerInstance.UsageCategory usage_category: Filter by Usage Category
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
params = values.of({
'Recurring': recurring,
'TriggerBy': trigger_by,
'UsageCategory': usage_category,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return TriggerPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of TriggerInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return TriggerPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a TriggerContext
:param sid: Fetch by unique usage-trigger Sid
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid)
def __call__(self, sid):
"""
Constructs a TriggerContext
:param sid: Fetch by unique usage-trigger Sid
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TriggerList>'
class TriggerPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the TriggerPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
super(TriggerPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'])
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TriggerPage>'
class TriggerContext(InstanceContext):
""" """
def __init__(self, version, account_sid, sid):
"""
Initialize the TriggerContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param sid: Fetch by unique usage-trigger Sid
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
super(TriggerContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'sid': sid}
self._uri = '/Accounts/{account_sid}/Usage/Triggers/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch a TriggerInstance
:returns: Fetched TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: HTTP method to use with callback_url
:param unicode callback_url: URL Twilio will request when the trigger fires
:param unicode friendly_name: A user-specified, human-readable name for the trigger.
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'FriendlyName': friendly_name,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the TriggerInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TriggerContext {}>'.format(context)
class TriggerInstance(InstanceResource):
""" """
class UsageCategory(object):
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IP_MESSAGING = "ip-messaging"
IP_MESSAGING_COMMANDS = "ip-messaging-commands"
IP_MESSAGING_DATA_STORAGE = "ip-messaging-data-storage"
IP_MESSAGING_DATA_TRANSFER = "ip-messaging-data-transfer"
IP_MESSAGING_ENDPOINT_CONNECTIVITY = "ip-messaging-endpoint-connectivity"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = | |
################################################################################
##### Module with lmfit models for Gaussian and Hyper-EMG distributions
##### Author: <NAME>
##### Import dependencies
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lmfit as fit
from .config import *
from .emg_funcs import *
upper_bound_taus = 5e-02 # keeps minimizer from running towards virtually flat tails #TODO: COnsider moving to config for user control
rel_var_mus = 1e-05 # allows centroids of nderlying Gaussians (`mu`) to vary within x_pos +- rel_var_mus*x_pos
def create_default_init_pars(mass_number=100): #TODO: COnsider moving to config for user control
"""
Scale default parameters to mass of interest and return parameter dictionary.
Parameters
----------
mass_number : int, optional
Atomic mass number of peaks of interest, defaults to 100.
Returns
-------
dict
Dictionary with default initial parameters (scaled to `mass_number`).
Notes
-----
**The default parameters were defined for mass 100**, to obtain suitable
parameters at other masses all mass-dependent parameters (i.e. shape
parameters & `amp`) are multiplied by the scaling factor `mass_number`/100.
"""
# Default initial parameters for peaks around mass 100 (with
# mass scaling factor):
scl_factor = mass_number/100
amp = 0.45*scl_factor
mu = None
sigma = 0.00014*scl_factor # [u]
theta = 0.5
eta_m1 = 0.85
eta_m2 = 0.10
eta_m3 = 0.05
tau_m1 = 50e-06*scl_factor # [u]
tau_m2 = 500e-06*scl_factor # [u]
tau_m3 = 1000e-06*scl_factor # [u]
eta_p1 = 0.85
eta_p2 = 0.10
eta_p3 = 0.05
tau_p1 = 50e-06*scl_factor # [u]
tau_p2 = 600e-06*scl_factor # [u]
tau_p3 = 1000e-06*scl_factor # [u]
pars_dict = {'amp': amp, 'mu': mu, 'sigma': sigma, 'theta': theta,
'eta_m1': eta_m1, 'eta_m2': eta_m2, 'eta_m3': eta_m3,
'tau_m1': tau_m1, 'tau_m2': tau_m2, 'tau_m3': tau_m3,
'eta_p1': eta_p1, 'eta_p2': eta_p2, 'eta_p3': eta_p3,
'tau_p1': tau_p1, 'tau_p2': tau_p2, 'tau_p3': tau_p3}
return pars_dict
pars_dict = create_default_init_pars()
################################################################################
##### Define emgfit fit models
def Gaussian(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Gaussian lmfit model (single-peak Gaussian fit model)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def Gaussian(x, amp, mu, sigma):
return amp/(sigma*np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2/(2*sigma**2))
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak
model = fit.Model(Gaussian, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=0)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
return model
def emg01(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(0,1) lmfit model (single-peak fit model with one exponential tail
on the right)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def emg01(x, amp, mu, sigma, tau_p1):
return amp*h_emg(x, mu, sigma, 0, (0,),(0,),(1,),(tau_p1,))
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak
model = fit.Model(emg01, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=1e-20)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_p1')
return model
def emg10(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(1,0) lmfit model (single-peak fit model with one exponential tail
on the left)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def emg10(x, amp, mu, sigma, tau_m1):
return amp*h_emg(x, mu, sigma, 1, (1,),(tau_m1,),(0,),(0,))
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak
model = fit.Model(emg10, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=1e-20)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m1')
return model
def emg11(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(1,1) lmfit model (single-peak fit model with one exponential tail
on the left and one exponential tail on the right)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial guess of peak amplitude.
init_pars : dict
Initial parameters for fit ('amp' and 'mu' parameters in `init_pars`
dictionary are overwritten by the given `amp` and `x_pos` arguments)
vary_shape_pars : bool
Whether to vary or fix peak shape parameters (i.e. sigma, theta,
eta's and tau's).
index_first_peak : int
Index of the first peak to be fit in a multi-peak-fit. Only use this
during peak shape determination to enforce common shape parameters
for all peaks to be fitted. (For a regular fit with
``vary_shape_pars = False`` this is irrelevant.)
Returns
-------
:class:`lmfit.model.Model`
`lmfit` model object
"""
# Define model function
def emg11(x, amp, mu, sigma, theta, tau_m1, tau_p1):
return amp*h_emg(x, mu, sigma, theta, (1,),(tau_m1,),(1,),(tau_p1,)) # from emg_funcs.py
pref = 'p{0}_'.format(peak_index) # set prefix for respective peak
model = fit.Model(emg11, prefix = pref, nan_policy='propagate')
# Add parameters bounds or restrictions and define starting values
model.set_param_hint(pref+'amp', value=amp, min=1e-20)
model.set_param_hint(pref+'mu', value=x_pos, min=x_pos*(1-rel_var_mus), max=x_pos*(1+rel_var_mus))
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, vary=vary_shape_pars)
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, vary=vary_shape_pars)
# Enfore common shape parameters for all peaks
# (only needed during peak shape calibration)
if index_first_peak != None and (peak_index != index_first_peak):
first_pref = 'p{0}_'.format(index_first_peak)
model.set_param_hint(pref+'sigma', value= init_pars['sigma'], min=0, max=init_pars['sigma']+0.005, expr=first_pref+'sigma')
model.set_param_hint(pref+'theta', value= init_pars['theta'], min=0, max=1, expr=first_pref+'theta')
model.set_param_hint(pref+'tau_m1', value= init_pars['tau_m1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_m1')
model.set_param_hint(pref+'tau_p1', value= init_pars['tau_p1'], min=1e-12, max=upper_bound_taus, expr=first_pref+'tau_p1')
return model
def emg12(peak_index, x_pos, amp, init_pars=pars_dict,
vary_shape_pars=True, index_first_peak=None):
"""
Hyper-EMG(1,2) lmfit model (single-peak fit model with one exponential tail
on the left and two exponential tails on the right)
Parameters
----------
peak_index : int
Index of peak to fit.
x_pos : float
Initial guess of peak centroid.
amp : float
Initial | |
251, 257, 263, 269, 271, 277, 281, \
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, \
353, 359, 367, 373, 379, 383, 389, 397, 401, 409, \
419, 421, 431, 433, 439, 443, 449, 457, 461, 463, \
467, 479, 487, 491, 499, 503, 509, 521, 523, 541, \
547, 557, 563, 569, 571, 577, 587, 593, 599, 601, \
607, 613, 617, 619, 631, 641, 643, 647, 653, 659, \
661, 673, 677, 683, 691, 701, 709, 719, 727, 733, \
739, 743, 751, 757, 761, 769, 773, 787, 797, 809, \
811, 821, 823, 827, 829, 839, 853, 857, 859, 863, \
877, 881, 883, 887, 907, 911, 919, 929, 937, 941, \
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, \
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, \
1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, \
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, \
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, \
1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, \
1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, \
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, \
1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, \
1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, \
1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, \
1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, \
1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, \
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, \
1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, \
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, \
2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, \
2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, \
2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, \
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, \
2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, \
2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, \
2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, \
2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, \
2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, \
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, \
2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, \
3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, \
3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, \
3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, \
3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, \
3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, \
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, \
3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, \
3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, \
3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, \
3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, \
3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, \
3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, \
4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, \
4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, \
4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, \
4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, \
4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, \
4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, \
4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, \
4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, \
4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, \
4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, \
4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, \
4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, \
5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, \
5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, \
5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, \
5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, \
5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, \
5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, \
5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, \
5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, \
5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, \
5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, \
5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, \
5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, \
6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, \
6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, \
6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, \
6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, \
6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, \
6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, \
6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, \
6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, \
6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, \
6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, \
6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, \
7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, \
7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, \
7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, \
7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, \
7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, \
7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, \
7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, \
7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, \
7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, \
7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, \
7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, \
8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, \
8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, \
8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, \
8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, \
8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, \
8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, \
8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, \
8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, \
8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, \
8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, \
8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, \
9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, \
9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, \
9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, \
9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, \
9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, \
9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, \
9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, \
9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, \
9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, \
9817, 9829, 9833, 9839, 9851, 9857, 9859, | |
from Player import Player
import numpy as np
class FuziyPlayer(Player):
def name(self):
return "Fuziy Player"
def max_value(self, board, action, alpha, beta, player_code, p):
if p == 0:
result = self.evaluate(player_code, board), action
return result
sucessors = self.sucessores(player_code, board)
for s in sucessors:
mv, ac = self.min_value(s['board'], s['action'], alpha, beta, player_code, p-1)
if (mv > alpha):
alpha = mv
action = ac
if (alpha >= beta):
return alpha, action
return alpha, action
def min_value(self, board, action, alpha, beta, player_code, p):
if p == 0:
result = self.evaluate(player_code, board), action
return result
sucessors = self.sucessores(player_code, board)
for s in sucessors:
mv, ac = self.max_value(s['board'], s['action'], alpha, beta, player_code, p-1)
if (mv < beta):
beta = mv
action = ac
if (beta <= alpha):
return beta, action
return beta, action
def move(self, player_code, board):
_, action = self.max_value(board, None, -999999, 999999, player_code, 5)
if (self.emergency(board, player_code)):
sucessores = self.sucessores(self.enemy(player_code), board)
for s in sucessores:
result = self.evaluate(self.enemy(player_code), s['board'])
if (result > 70000):
print("EMERGENCY")
return None, s['action']
near_lost, defence_position = self.next_move(self.enemy(player_code), board)
if near_lost:
print("BLOQUEIO APENAS")
return None, defence_position
near_win, win_position = self.next_move(player_code, board)
if near_win:
print("VITORIA APENAS")
return None, win_position
if action is None:
for i in range(6):
for j in range(7):
if board[i,j] == 0:
return None, j
return None, action
def sucessores(self, player_code, board):
sucessors = []
for i in range(0,7):
b = self.movement(player_code, board, i)
if(b is not None):
sucessors.append({'board':b, 'action':i})
return sucessors
def enemy(self, player):
if player == 1:
return 2
else:
return 1
def evaluate(self, player, board):
lines = self.count_row_line(player, board)
cols = self.count_row_column(player, board)
diags = self.count_row_diag(player, board)
diags2 = self.count_row_diag(player, board[::-1])
possible_path = lines['2'] + cols['2'] + diags['2'] + diags2['2']
near_to_win = lines['3'] + cols['3'] + diags['3'] + diags2['3']
almost_win = lines['4'] + cols['4'] + diags['4'] + diags2['4']
win = 100000*almost_win + 1000*near_to_win + possible_path
enemy = self.enemy(player)
enemy_lines = self.count_row_line(enemy, board)
enemy_cols = self.count_row_column(enemy, board)
enemy_digs = self.count_row_diag(enemy, board)
enemy_digs2 = self.count_row_diag(enemy, board[::-1])
possible_path_lost = enemy_lines['2'] + enemy_cols['2'] + enemy_digs['2'] + enemy_digs2['2']
near_to_lost = enemy_lines['3'] + enemy_cols['3'] + enemy_digs['3'] + enemy_digs2['3']
almost_lost = enemy_lines['4'] + enemy_cols['4'] + enemy_digs['4'] + enemy_digs2['4']
lost = 100000*almost_lost + 1000*near_to_lost + possible_path_lost
return (win - lost)
def count_row_line(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for i in range(6):
counter = 0
for j in range(6):
if ((board[i, j] == player) and (board[i, j] == board[i, j + 1])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_row_column(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for i in range(7):
counter = 0
for j in range(5):
if ((board[j, i] == player) and (board[j,i] == board[j+1,i])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_row_diag(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for k in range(-2,4):
counter = 0
x = np.diag(board, k=k)
for i in range(0,len(x)-1):
if ((x[i] == player) and (x[i] == x[i+1])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_last_line(self, player, board):
counter = 0
for i in range(6):
if (board[5, i] == player):
counter = counter + 1
return counter
def emergency(self, board, player_code):
enemy = self.enemy(player_code)
enemy_lines = self.count_row_line(enemy, board)
enemy_cols = self.count_row_column(enemy, board)
enemy_digs = self.count_row_diag(enemy, board)
enemy_digs2 = self.count_row_diag(enemy, board[::-1])
if (enemy_cols['3'] > 0 or enemy_lines['3'] > 0 or enemy_digs['3'] > 0 or enemy_digs2['3']> 0):
return True
return False
def next_move(self, player, board):
next_position = 0
#horizontal
for i in range(6):
stay = 0
for j in range(6):
if i == 5:
if j == 3:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j+1
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i, j+1] == player) and (board[i, j] == player)):
stay += 1
next_position = j+2
return True, next_position
if j == 4:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player) and (board[i, j-1] == player)):
stay += 1
next_position = j+1
return True, next_position
if j >= 5:
if ((board[i, j-1] == 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
else:
if j == 3:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j+1
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i+1, j+2] != 0) and (board[i, j+1] == player) and (board[i, j] == player)):
stay += 1
next_position = j+2
return True, next_position
if j == 4:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player) and (board[i, j-1] == player)):
stay += 1
next_position = j+1
return True, next_position
if j >= 5:
if ((board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
#vertical
for i in range(7):
end = 0
for j in range(5):
if ((board[j, i] | |
<gh_stars>1-10
import random
from . import boosts as b
from .techniques import Tech
all_styles = {}
DEFAULT_STYLE_MOVE_DICT = {2: '1', 4: '2', 6: '3', 8: '4', 10: '5'}
class Style(object):
def __init__(self, name, techs_dict, move_str_dict):
self.name = name
self.techs = techs_dict
if self.techs:
self.is_tech_style = True
else:
self.is_tech_style = False
features = []
self.features = features
for lv, t in self.techs.items():
if t.descr_short not in features:
features.append(t.descr_short)
self.descr = ''
self.descr_short = f"({', '.join(features)})"
all_styles[self.name] = self
self.move_strings = (
move_str_dict if move_str_dict is not None else DEFAULT_STYLE_MOVE_DICT.copy()
)
def __str__(self):
return f'{self.name} ({self.descr})'
def __repr__(self):
return f'{self.__class__.__name__}({self.name}, {self.techs}, {self.move_strings})'
# todo to txt file, then load? less syntax?
default_styles = [
Style(
'Bag<NAME>',
{
3: Tech('Bagua Zhang I', dodge_mult=b.EVADE1),
5: Tech('Bagua Zhang II', qp_gain_mult=b.QP_GAIN1), # todo replace this
7: Tech('Bagua Zhang III', in_fight_impro_wp_chance=b.IN_FIGHT_IMPRO_WP_CH1),
},
{1: 'Throw', 2: '1,palm', 4: '2,punch', 6: '3,kick', 8: '4,palm'},
),
Style(
'Balanced Fist',
{
3: Tech('Balanced Fist I', dist2_bonus=b.DIST2_MULT1),
5: Tech('Balanced Fist II', atk_mult=b.ATTACK_HALF, dfs_mult=b.DEFENSE_HALF),
7: Tech('Balanced Fist III', atk_mult=b.ATTACK1, dfs_mult=b.DEFENSE1),
},
{
1: 'Sweep', # deprives opponent of balance
2: '1,mid-range',
4: '2,mid-range',
6: '3,long-range,charging',
8: '4,mid-range',
},
),
Style(
'Centipede',
{
3: Tech('Centipede I', agility_mult=b.AGILITY1),
5: Tech('Centipede II', speed_mult=b.SPEED1),
7: Tech(
'A Hundred Arms',
punch_strike_mult=b.STRIKE_MULT_HALF,
palm_strike_mult=b.STRIKE_MULT_HALF,
),
},
{1: 'Short Palm', 2: '1,punch', 4: '2,palm', 6: '3,punch', 8: '4,palm'},
),
Style(
'<NAME>',
{
3: Tech('Choy Li Fut I', atk_mult=b.ATTACK1),
5: Tech('Choy Li Fut II', block_mult=b.BLOCK1),
7: Tech('Choy Li Fut III', dfs_penalty_step=b.DFS_PEN2),
},
{1: 'Short Punch', 2: '1,grappling', 4: '2,kick', 6: '3,punch', 8: '4,kick'},
),
Style(
'Dragon',
{
3: Tech('Dragon I', unblock_chance=b.UNBLOCK_CHANCE1),
5: Tech('Dragon II', dodge_mult=b.EVADE1),
7: Tech('Dragon III', qp_max=b.QP_MAX1, qp_start=b.QP_START1),
},
{2: 'Dragon Claw', 4: '2,kick', 6: '3,punch', 8: '4,energy,kick'},
),
Style(
'Drunken Boxing',
# todo for drunken: no fall damage, falling restores qp, off-balance gives bonus to atk&dfs
{
3: Tech('Drunken Boxing I', agility_mult=b.AGILITY1),
5: Tech('Drunken Boxing II', exotic_strike_mult=b.RARE_STRIKE_MULT1),
7: Tech('Drunken Boxing III', flying_strike_mult=b.RARE_STRIKE_MULT1),
},
{2: '1,grappling', 4: 'Trick Punch', 6: '3,trick', 8: '4,trick'}, # todo 'Drunken Punch'
),
Style(
'Eagle Claw',
# todo jumps cost less stamina; jumps restore qp?; reduced complexity for jumps
# todo jump feature is called flying and it's hard to change
{
3: Tech('Eagle Claw I', dist3_bonus=b.DIST3_MULT1),
5: Tech('Eagle Claw II', stun_chance=b.STUN_CH1),
7: Tech('Eagle Claw III', critical_chance_mult=b.CRIT_CH1,
critical_mult=b.CRIT_M1),
},
{
1: ('Leap Forward', 'Leap Back'),
2: '1,grappling',
4: '2,charging,punch',
6: '3,flying',
8: '4,flying',
},
),
Style(
'Eight Extremities Fist',
# todo opens the opponent's arms forcibly? fast movement?
{
3: Tech('Eight Extremities Fist I', dist1_bonus=b.DIST1_MULT1),
5: Tech('Eight Extremities Fist II', elbow_strike_mult=b.RARE_STRIKE_MULT1),
7: Tech('Eight Extremities Fist III', speed_mult=b.SPEED1),
},
{
1: ('Elbow', 'Charging Step'),
2: '1,knee',
4: '2,punch',
6: '3,elbow',
8: '4,close-range',
},
),
Style(
'Gecko',
# todo an emphasis on speed and gravity? walls? implement 'cornered' status?
{
3: Tech('Gecko I', dist3_bonus=b.DIST3_MULT1),
5: Tech('Gecko II', environment_chance=b.ENVIRONMENT_CH1),
7: Tech('Gecko III', flying_strike_mult=b.STRIKE_MULT1),
},
{
1: 'Leap Back',
2: '1,long-range',
4: '2,long-range',
6: '3,extra long-range',
8: '4,long-range',
},
),
Style(
'Hung Ga',
{
3: Tech('Hung Ga I', punch_strike_mult=b.STRIKE_MULT1),
5: Tech(
'Hung Ga II', stamina_max_mult=b.STAM_MAX1, stamina_gain_mult=b.STAM_RESTORE1
),
7: Tech('Hung Ga III', strength_mult=b.STRENGTH1),
},
{1: 'Short Punch', 2: '1,punch', 4: '2,punch', 6: '3,punch', 8: 'No-Shadow Kick'},
),
Style(
'Leopard',
# todo counters
{
3: Tech('Leopard I', speed_mult=b.SPEED1),
5: Tech('Leopard II', agility_mult=b.AGILITY1),
7: Tech('Leopard III', guard_while_attacking=b.GUARD_WHILE_ATTACKING1),
},
{
1: ('Leap Forward', 'Leap Back'),
2: 'Leopard Punch',
4: '2,elbow',
6: '3,knee',
8: '4,shocking',
},
),
Style(
'Long Fist',
# todo acrobatics bonus - more damage with complexity and no penalty
{
3: Tech('Long Fist I', dist3_bonus=b.DIST3_MULT1),
5: Tech('Whirlwind Kicks', kick_strike_mult=b.STRIKE_MULT1),
7: Tech('Long Fist III', atk_mult=b.ATTACK1),
},
{1: 'Leap Back', 2: 'Long Punch', 4: '2,long,kick', 6: '3,acrobatic', 8: '4,acrobatic'},
),
Style(
'Monkey',
# todo reduce 'flying' stam_cost and complexity; can also give acrobatics bonus
# todo very good ground defense
{
3: Tech('Monkey I', dodge_mult=b.EVADE1),
5: Tech('Monkey II', agility_mult=b.AGILITY1),
7: Tech('Monkey III', flying_strike_mult=b.STRIKE_MULT1),
},
{
1: ('Leap Forward', 'Leap Back'),
2: '1,palm',
4: '2,claw',
6: '3,flying,punch',
8: '4,flying',
},
),
Style(
'Poking Foot',
# todo a tech: if a strike connects, give a speed boost (combo)
{
3: Tech('Poking Foot I', kick_strike_mult=b.STRIKE_MULT1),
5: Tech('Poking Foot II', punch_strike_mult=b.STRIKE_MULT1),
7: Tech('Falling Meteorites', speed_mult=b.SPEED1),
},
{
1: 'Short Punch',
2: '1,fast,kick',
4: '2,lightning,kick',
6: '3,lightning,punch',
8: '4,shocking,kick',
},
),
Style(
'Praying Mantis',
# todo claw mult instead of atk?
{
3: Tech('Praying Mantis I', block_mult=b.BLOCK1),
5: Tech('Praying Mantis II', stun_chance=b.STUN_CH1),
7: Tech('Praying Mantis III', atk_mult=b.ATTACK1),
},
{1: 'Claw', 2: '1,claw', 4: 'Mantis Hook', 6: '3,fast,kick', 8: '4,shocking,claw'},
),
Style(
'Scorpion',
{
3: Tech('Scorpion I', kick_strike_mult=b.STRIKE_MULT1),
5: Tech('Scorpion II', kick_strike_mult=b.STRIKE_MULT1),
7: Tech('Scorpion III', kick_strike_mult=b.STRIKE_MULT1),
},
{2: '1,kick', 4: '2,kick', 6: '3,kick,shocking', 8: '4,kick'},
),
Style(
'Shaolin Fist',
{
3: Tech('Shaolin Fist I', hp_gain_mult=b.HP_GAIN1),
5: Tech('Shaolin Fist II', palm_strike_mult=b.STRIKE_MULT1),
7: Tech('Shaolin Fist III', palm_strike_mult=b.STRIKE_MULT1),
},
{2: '1,palm', 4: '2,palm', 6: '3,palm,surprise', 8: '4,palm'}, # todo 'Buddha Palm'
),
Style(
'Snake',
{
3: Tech('Snake I', dodge_mult=b.EVADE1),
5: Tech('Snake II', critical_chance_mult=b.CRIT_CH1, critical_dam_mult=b.CRIT_M1),
7: Tech('Snake III', qp_max=b.QP_MAX1, qp_start=b.QP_START1),
},
{
2: '1,claw',
4: 'Trick Claw', # todo 'Snake Strike'
6: '3,shocking,claw',
8: '4,claw',
}, # todo 'Poisonous Snake'
),
Style(
'Taiji',
{
3: Tech('Taiji I', guard_dfs_bonus=b.GUARD_DFS1),
5: Tech('Taiji II', qp_gain_mult=b.QP_GAIN1),
7: Tech('Taiji III', health_mult=b.HEALTH1),
},
{2: '1,palm', 4: '2,palm', 6: '3,energy', 8: '4,energy'},
),
Style(
'Tiger',
{
3: Tech('Tiger I', atk_mult=b.ATTACK1),
5: Tech('Tiger II', strength_mult=b.STRENGTH1),
7: Tech(
'Tiger III', stamina_max_mult=b.STAM_MAX1, stamina_gain_mult=b.STAM_RESTORE1
),
},
{
2: '1,claw', # todo 'Tiger Claw' causes bleeding and not at lv2
4: '2,power,palm',
6: '3,power',
8: '4,kick',
}, # todo "Tiger's Tail"
),
Style(
'Toad',
{
3: Tech('Toad I', strength_mult=b.STRENGTH1),
5: Tech('Toad II', dam_reduc=b.DAM_REDUC1),
7: Tech('Toad III', dam_reduc=b.DAM_REDUC1),
},
{2: '1,palm', 4: '2,heavy,punch', 6: '3,power,palm', 8: '4,punch'},
),
Style(
'White Crane',
{
3: Tech('White Crane I', dfs_mult=b.DEFENSE1),
5: Tech('White Crane II', dist1_bonus=b.DIST1_MULT1),
7: Tech('White Crane III', block_mult=b.BLOCK1),
},
{2: '1,claw', 4: '2,claw', 6: '3,close-range', 8: '4,kick'}, # todo "Crane's Beak"
),
Style(
'<NAME>',
{
3: Tech('Wing Chun I', punch_strike_mult=b.STRIKE_MULT1),
5: Tech('Wing Chun II', dist1_bonus=b.DIST1_MULT1),
7: Tech('Wing Chun III', speed_mult=b.SPEED1),
},
{
1: 'Charging Step',
2: 'Short Fast Punch', # todo Wing Chun Punch
4: '2,elbow',
6: '3,short',
8: '4,short,punch',
}, # todo Chain of Punches
),
Style(
'Xing Yi',
{
3: Tech('Xing Yi I', speed_mult=b.SPEED1),
5: Tech('Xing Yi II', critical_chance_mult=b.CRIT_CH1, critical_dam_mult=b.CRIT_M1),
7: Tech('Xing Yi III', qp_gain_mult=b.QP_GAIN1),
},
{
2: '1,fast,mid-range',
4: '2,surprise,close-range',
6: '3,lightning,punch',
8: '4,lightning,mid-range',
},
),
]
BEGGAR_STYLE = Style(
'Beggar\'s Fist',
{
3: Tech('Beggar\'s Fist I', dfs_mult=b.DEFENSE1),
5: Tech('Beggar\'s Fist II', palm_strike_mult=b.RARE_STRIKE_MULT1),
7: Tech('Beggar\'s Fist III', qp_gain_mult=b.QP_GAIN1, hp_gain_mult=b.HP_GAIN1),
},
{2: '1,palm', 4: '2,palm', 6: '3,energy', 8: '4,energy'},
)
THIEF_STYLE = Style(
'Thief\'s Shadow',
{
3: Tech('Thief\'s Shadow I', speed_mult=b.SPEED1),
5: Tech('Thief\'s Shadow II', dodge_mult=b.EVADE1),
7: Tech('Thief\'s Shadow III', atk_mult=b.ATTACK1),
},
{2: '1,fast', 4: '2,surprise', 6: '3,shocking', 8: '4,trick'},
)
DRUNKARD_STYLE = Style(
'Drunken Dragon',
{
3: Tech('Drunken Dragon I', agility_mult=b.AGILITY1),
5: Tech('Drunken Dragon II', exotic_strike_mult=b.RARE_STRIKE_MULT1),
7: Tech('Drunken Dragon III', unblock_chance=b.UNBLOCK_CHANCE1),
},
{2: '1,punch', 4: '2,energy', 6: '5,trick', 8: '6,trick'}, # 'Drunken Punch'?
)
TURTLE_NUNJUTSU = Style(
'Turtle Ninjutsu',
{
3: Tech('Cowabunga I', agility_mult=b.AGILITY1),
5: Tech('Cowabunga II', flying_strike_mult=b.RARE_STRIKE_MULT1),
7: Tech('Cowabunga III', weapon_strike_mult=b.WP_STRIKE_MULT1),
},
{2: '1,punch', 4: '2,kick', 6: '3,flying', 8: '4,flying'},
)
FLOWER_KUNGFU = Style('Flower Kung-fu', {}, {})
DIRTY_FIGHTING = Style(
'Dirty Fighting',
{}, # todo techs for dirty fighting?
{2: '1,heavy', 4: '2,power', 6: '3,shocking', 8: '4,surprise'},
)
POLICE_KUNGFU = Style('Police Kung-fu', {}, {})
MONSTER_KUNGFU = Style('Monster Kung-fu', {}, {})
ZENS_STYLE = Style('Savant', {}, {})
NO_STYLE = Style('No Style', {}, {})
# todo add more foreign styles
# NB! when adding new foreign style, add names to names.py as well!
FOREIGN_STYLES = {
'England': Style(
'English Boxing',
{
3: Tech('English Boxing I', punch_strike_mult=b.STRIKE_MULT1),
5: Tech('English Boxing II', block_mult=b.BLOCK1),
7: Tech('English Boxing III', punch_strike_mult=b.STRIKE_MULT1),
},
{1: ('Long Punch', 'Short Punch'), 2: '1,punch', 4: '2,punch', 6: '3,punch', 8: '4,punch'},
),
'Germany': Style(
'Wrestling',
# todo add grabbing to wreslers (a defense move)
{
3: Tech('Wrestling I', grappling_strike_mult=b.STRIKE_MULT1),
5: Tech('Wrestling II', strength_mult=b.STRENGTH1),
7: Tech('Wrestling III', hp_gain_mult=b.HP_GAIN1),
},
{
1: 'Throw',
2: '1,grappling',
4: '2,grappling',
6: '3,grappling',
8: '4,grappling',
},
),
'Japan': Style(
'Karate',
| |
if category not in shops:
return await ctx.send(f"{category} category not found!")
# check if item exists
if item_name not in shops[category]:
return await ctx.send(f"{item_name} item not found!")
if "desc" in shops[category][item_name]:
overwrite = "overwritten"
else:
overwrite = "set"
shops[category][item_name]["desc"] = description
await ctx.send(f"Description has been {overwrite} for {item_name} in the {category} category")
@_datashopset.command(name="delitem")
async def delete_data_item(self, ctx, shop_name, item_name):
"""Delete an item from a shop, whether it has options or not"""
async with self.config.datashops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
else:
del shops[shop_name][item_name]
return await ctx.tick()
@_datashopset.command(name="addoption")
async def add_data_item_option(self, ctx, shop_name, item_name, option, price):
"""Add an option to an existing item in the data shop"""
async with self.config.datashops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
# check if option exists
elif option in shops[shop_name][item_name]["options"]:
return await ctx.send(f"{option} option already exists!")
else:
shops[shop_name][item_name]["options"][option] = price
return await ctx.tick()
@_datashopset.command(name="deloption")
async def del_data_item_option(self, ctx, shop_name, item_name, option):
"""Delete an option from an existing item in the data shop"""
async with self.config.datashops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
# check if option exists
elif option not in shops[shop_name][item_name]["options"]:
return await ctx.send(f"{option} option not found!")
else:
del shops[shop_name][item_name]["options"][option]
return await ctx.tick()
@commands.command(name="setcluster")
@commands.guild_only()
async def set_cluster(self, ctx):
"""
Set the cluster you play on
This is so the cog knows where to send your data
"""
arktools = await self.arktools(ctx)
if not arktools:
return
clusters = await arktools.config.guild(ctx.guild).clusters()
clist = ""
for clustername in clusters:
clist += f"`{clustername}`\n"
if clist == "":
return await ctx.send("No clusters have been created!")
embed = discord.Embed(
description=f"**Type one of the cluster names below.**\n"
f"{clist}"
)
msg = await ctx.send(embed=embed)
def check(message: discord.Message):
return message.author == ctx.author and message.channel == ctx.channel
try:
reply = await self.bot.wait_for("message", timeout=60, check=check)
except asyncio.TimeoutError:
return await msg.edit(embed=discord.Embed(description="You took too long :yawning_face:"))
if reply.content.lower() not in clusters:
return await msg.edit(embed=discord.Embed(description="Cluster doesn't exist!"))
else:
async with self.config.guild(ctx.guild).users() as users:
users[ctx.author.id] = reply.content.lower()
embed = discord.Embed(
description=f"Cluster has been set for {ctx.author.name}!",
color=discord.Color.green()
)
return await msg.edit(embed=embed)
@_rconshopset.command(name="addcategory")
async def add_rcon_category(self, ctx, shop_name):
"""Add an rcon shop category"""
async with self.config.guild(ctx.guild).shops() as shops:
if shop_name in shops:
return await ctx.send(f"{shop_name} shop already exists!")
else:
shops[shop_name] = {}
return await ctx.send(f"{shop_name} shop created!")
@_rconshopset.command(name="delcategory")
async def delete_rcon_category(self, ctx, shop_name):
"""Delete an rcon shop category"""
async with self.config.guild(ctx.guild).shops() as shops:
if shop_name in shops:
del shops[shop_name]
return await ctx.send(f"{shop_name} shop removed!")
else:
return await ctx.send(f"{shop_name} shop doesn't exist!")
@_rconshopset.command(name="renamecategory")
async def rename_rcon_category(self, ctx, current_name, new_name):
"""Rename an rcon shop category"""
async with self.config.guild(ctx.guild).shops() as shops:
if current_name in shops:
shops[new_name] = shops.pop(current_name)
return await ctx.send(f"{current_name} shop has been renamed to {new_name}!")
else:
return await ctx.send(f"{current_name} shop doesn't exist!")
@_rconshopset.command(name="additem")
async def add_rcon_item(self, ctx, category, item_name, price=None):
"""
Add an item to an rcon shop category
Use quotes if item name has spaces
"""
async with self.config.guild(ctx.guild).shops() as shops:
# check if shop exists
if category not in shops:
return await ctx.send(f"{category} category not found!")
# check if item exists
if item_name in shops[category]:
return await ctx.send(f"{item_name} item already exists!")
if price:
shops[category][item_name] = {"price": price, "options": {}, "paths": []}
msg = await ctx.send(
"Type the full blueprint paths including quantity/quality/blueprint numbers below.\n"
"Separate each full path with a new line for multiple items in one pack.\n"
"Type `cancel` to cancel the item.")
def check(message: discord.Message):
return message.author == ctx.author and message.channel == ctx.channel
try:
reply = await self.bot.wait_for("message", timeout=240, check=check)
if reply.content.lower() == "cancel":
return await ctx.send("Item add canceled.")
if reply.attachments:
attachment_url = reply.attachments[0].url
async with aiohttp.ClientSession() as session:
async with session.get(attachment_url) as resp:
paths = await resp.text()
paths = paths.split("\r\n")
else:
paths = reply.content.split("\n")
shops[category][item_name]["paths"] = paths
return await ctx.send(f"Item paths set!")
except asyncio.TimeoutError:
return await msg.edit(embed=discord.Embed(description="You took too long :yawning_face:"))
else:
shops[category][item_name] = {"price": False, "options": {}, "paths": []}
return await ctx.send(f"Item added, please add options to it with `{ctx.prefix}shopset rcon addoption`")
@_rconshopset.command(name="description")
async def add_rcon_description(self, ctx, category, item_name, *, description: str):
"""Add a description to an RCON shop item"""
async with self.config.guild(ctx.guild).shops() as shops:
# check if shop exists
if category not in shops:
return await ctx.send(f"{category} category not found!")
# check if item exists
if item_name not in shops[category]:
return await ctx.send(f"{item_name} item not found!")
if "desc" in shops[category][item_name]:
overwrite = "overwritten"
else:
overwrite = "set"
shops[category][item_name]["desc"] = description
await ctx.send(f"Description has been {overwrite} for {item_name} in the {category} category")
@_rconshopset.command(name="delitem")
async def delete_rcon_item(self, ctx, shop_name, item_name):
"""
Delete an item from an rcon shop category
"""
async with self.config.guild(ctx.guild).shops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
else:
del shops[shop_name][item_name]
return await ctx.tick()
@_rconshopset.command(name="addoption")
async def add_rcon_item_option(self, ctx, shop_name, item_name, option, price):
"""
Add an option to an existing item in the rcon shop
When it asks for paths, be sure to include the FULL blueprint path and <quantity> <quality> <BP T/F> identifiers
for BP identifier: 1=True and 0=False
"""
async with self.config.guild(ctx.guild).shops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
# check if option exists
elif option in shops[shop_name][item_name]["options"]:
return await ctx.send(f"{option} option already exists!")
else:
msg = await ctx.send(
"Type the full blueprint paths including quantity/quality/blueprint numbers below.\n"
"Separate each full path with a new line for multiple items in one option.\n"
"Type `cancel` to cancel the option.")
def check(message: discord.Message):
return message.author == ctx.author and message.channel == ctx.channel
try:
reply = await self.bot.wait_for("message", timeout=240, check=check)
if reply.content.lower() == "cancel":
return await ctx.send("Option add canceled.")
if reply.attachments:
attachment_url = reply.attachments[0].url
async with aiohttp.ClientSession() as session:
async with session.get(attachment_url) as resp:
paths = await resp.text()
paths = paths.split("\r\n")
else:
paths = reply.content.split("\n")
shops[shop_name][item_name]["options"][option] = {"price": price, "paths": paths}
return await ctx.send(f"Option set!")
except asyncio.TimeoutError:
return await msg.edit("You took too long :yawning_face:")
@_rconshopset.command(name="deloption")
async def del_rcon_item_option(self, ctx, shop_name, item_name, option):
"""Delete an option from an existing item in the rcon shop"""
async with self.config.guild(ctx.guild).shops() as shops:
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
# check if option exists
elif option not in shops[shop_name][item_name]["options"]:
return await ctx.send(f"{option} option not found!")
else:
del shops[shop_name][item_name]["options"][option]
return await ctx.tick()
@_rconshopset.command(name="checkitem")
async def check_rcon_item(self, ctx, shop_name, item_name):
"""Check the blueprint strings in an item"""
shops = await self.config.guild(ctx.guild).shops()
# check if shop exists
if shop_name not in shops:
return await ctx.send(f"{shop_name} shop not found!")
# check if item exists
elif item_name not in shops[shop_name]:
return await ctx.send(f"{item_name} item not found!")
else:
pathmsg = ""
for path in shops[shop_name][item_name]["paths"]:
pathmsg += f"`{path}`\n"
return await ctx.send(pathmsg)
# USER COMMANDS
@commands.command(name="shopstats")
async def shop_stats(self, ctx):
"""View all items purchased from all shops"""
logs = await self.config.guild(ctx.guild).logs()
if logs["items"] == {}:
return await ctx.send("No logs yet!")
embeds = await shop_stats(logs)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.command(name="shoplb")
async def shop_leaderboard(self, ctx):
"""Open the shop leaderboard"""
logs = await self.config.guild(ctx.guild).logs()
if logs["users"] == {}:
return await ctx.send("No logs yet!")
shop_logs = {}
for user_id in logs["users"]:
count = 0
for item in logs["users"][user_id]:
purchased = logs["users"][user_id][item]["count"]
count += purchased
shop_logs[user_id] = count
sorted_items = | |
<reponame>j-suchard/amqp-rpc-server<filename>src/amqp_rpc_server/basic_consumer.py
"""The basic consumer which consumes messages and relays them to an executor function"""
import json
import logging
import secrets
import sys
from typing import Optional, Callable
import pika
import pika.channel
import pika.exceptions
import pika.exchange_type
import pika.frame
class BasicConsumer:
"""The basic consumer handling the connection to the message broker and the running of the
executor"""
def __init__(
self,
amqp_dsn: str,
exchange_name: str,
executor: Callable[[bytes], bytes],
content_validator: Optional[Callable[[bytes], bool]] = None,
queue_name: str = secrets.token_urlsafe(nbytes=32),
exchange_type: pika.exchange_type.ExchangeType = pika.exchange_type.ExchangeType.fanout
):
"""
Initialize a new BasicConsumer
This consumer will handle the connection to the message broker and will handle incoming
and outgoing messages.
:param amqp_dsn: The Data Source Name pointing to a AMQPv0-9-1 enabled message broker
:type amqp_dsn: str
:param exchange_name: The name of the exchange which this consumer will bind on and
listen for new incoming messages. If this exchange does not exist, the exchange will
be created as a fanout exchange allowing multiple consumers reading all messages
:type exchange_name: str
:param executor: The function which shall be called if a new message was received and
acknowledged
:type executor: Callable[[bytes], bytes]
:param content_validator: A callable which returns if the content of the message is
valid for the executor. If the callable returns `False` the message will be rejected.
If no content_validator is supplied the message will always be acknowledged
:type content_validator: Callable[[bytes], bool], optional
:param queue_name: The name of the queue which shall be bound to the exchange by this
consumer. If no queue name is supplied a name will be autogenerated
:type queue_name: str, optional
:param exchange_type: The type of the exchange which is used if the exchange does not
exist. If the exchange already exists, the exchange_type needs to match the one which
has already been declared
:type exchange_type: pika.exchange_type.ExchangeType, optional
:type queue_name: str, optional
"""
# Check if the AMQP Data Source Name is not None or emtpy
if amqp_dsn is None:
raise ValueError('The amqp_dsn is a required parameter may not be None')
if amqp_dsn.strip() == '':
raise ValueError('The amqp_dsn is a required parameter and may not be empty')
# Check if the exchange name is None or empty
if exchange_name is None:
raise ValueError('The exchange_name is a required parameter and may not be None')
if exchange_name.strip() == '':
raise ValueError('The exchange_name is a required parameter and may not be empty')
# Check if the executor is set correctly
if executor is None:
raise ValueError('The executor is a required parameter and may not be None')
# Store the properties to the object
self._amqp_dsn = amqp_dsn
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._queue_name = queue_name
self._executor = executor
self._content_validator = content_validator
# Create a logger for the consumer
self._logger = logging.getLogger('amqp_rpc_server.basic_consumer.BasicConsumer')
# Initialize some attributes which are needed later and apply typing to them
self._connection: Optional[pika.SelectConnection] = None
self._channel: Optional[pika.channel.Channel] = None
self._qos_prefetch_count = 0
self._consumer_tag = None
self._is_consuming = False
self._is_closing = False
self.may_reconnect = False
def start(self):
"""Start the consumer by connecting to the message broker"""
self._connection = self._connect()
self._connection.ioloop.start()
def stop(self):
"""Stop the consumer and shutdown the connection to the message broker cleanly"""
if not self._is_closing:
self._is_closing = True
self._logger.info('The consumer is stopping and closing the connection to the message '
'broker')
if self._is_consuming:
self._logger.debug('Stopping the consumer')
self._stop_consuming()
# Try to start the ioloop if necessary
try:
self._logger.debug('Restarting the ioloop')
self._connection.ioloop.start()
except Exception as error: # pylint: disable=broad-except
self._logger.debug('IOLoop restart not necessary')
pass
else:
self._logger.debug('Currently not consuming')
try:
self._connection.ioloop.stop()
except Exception: # pylint: disable=broad-except
pass
self._logger.info('Stopped the consumer and closed the connection to the message '
'broker')
def _stop_consuming(self):
"""Stop the consumption of messages"""
if self._channel:
self._logger.debug('Cancelling the active channel to the message broker')
self._channel.basic_cancel(self._consumer_tag, self._cb_channel_cancelled)
def _cb_channel_cancelled(self, method_frame: pika.frame.Method):
"""
Callback invoked if a channel has successfully been cancelled
:param method_frame: The result of the execution
:type method_frame: pika.frame.Method
"""
self._logger.debug('Successfully cancelled the channel at the message broker')
self._close_channel()
def _close_channel(self):
"""Close the currently active channel"""
self._logger.debug('Closing the currently active channel to the message broker')
self._channel.close()
def _connect(self) -> pika.SelectConnection:
"""Connect to the message broker
:return: The opened connection to the message broker
:rtype: pika.SelectConnection
"""
self._logger.info('Connecting to the message broker...')
self._logger.debug('Connection DSN: %s',
self._amqp_dsn)
# Build the connection parameters
connection_parameters = pika.URLParameters(self._amqp_dsn)
# Set the client properties
connection_parameters.client_properties = {
'connection_name': secrets.token_urlsafe(nbytes=16),
'product': 'AMQP-RPC Server',
'platform': f'Python {sys.version}',
'information': 'Licensed under the 3-Clause BSD License. See the LICENSE file '
'supplied with this library',
'copyright': 'Copyright (c) <NAME>'
}
return pika.SelectConnection(
parameters=connection_parameters,
on_open_callback=self._cb_connection_opened,
on_open_error_callback=self._cb_connection_open_failed,
on_close_callback=self._cb_connection_closed
)
def _cb_connection_open_failed(self, connection: pika.BaseConnection, reason: Exception):
"""
Callback for a failed connection attempt to a message broker
:param connection: The connection that failed
:type connection: pika.BaseConnection, unused
:param reason: The reason for the connection failure
:type reason: Exception
"""
self._logger.critical('Failed to establish a connection to the message broker: %s',
reason)
self.may_reconnect = True
self.stop()
return
def _cb_connection_closed(self, connection: pika.connection.Connection, reason: Exception):
"""
Callback for an unexpected connection closure
:param connection: The connection that has been closed
:type connection: pika.connection.Connection
:param reason: The reason for the connection closure
:type reason: Exception
"""
# Unset the channel so no more messages can be sent
self._channel = None
if self._is_closing:
self._connection.ioloop.stop()
else:
self._logger.error('The connection to the message broker was closed unexpectedly for '
'the following reason: %s',
reason)
self.may_reconnect = True
def _cb_connection_opened(self, connection: pika.BaseConnection):
"""Handle an opened connection
:param connection: The connection that has been opened
:type connection: pika.SelectConnection
"""
self._logger.debug('Connected to the message broker')
self._logger.debug('Server properties: %s',
connection.params.client_properties)
# Call for opening a channel
self._open_channel()
def _open_channel(self):
"""Try to open a new channel to the message broker"""
self._logger.debug('Opening a new channel between the message broker and the server...')
self._connection.channel(on_open_callback=self._cb_channel_opened)
def _cb_channel_opened(self, channel: pika.channel.Channel):
self._logger.debug('Opened a channel between the message broker and the server')
self._logger.debug('Channel number: %s',
channel.channel_number)
# Save the opened channel to the consumer
self._channel = channel
# Add a callback for a closed channel
self._channel.add_on_close_callback(self._cb_channel_closed)
# Set up the exchange
self._setup_exchange()
def _cb_channel_closed(self, _channel: pika.channel.Channel, reason: Exception):
"""Callback for how to handle a closed channel"""
if isinstance(reason, pika.exceptions.ChannelClosedByBroker):
self._logger.critical('The message broker closed the currently active channel')
self.may_reconnect = True
self._is_closing = True
self._close_connection()
elif isinstance(reason, pika.exceptions.ChannelClosedByClient):
self._logger.info('The server closed the connection to the message broker')
self._close_connection()
else:
self._logger.critical('The channel was closed for an not handled error: %s',
reason)
self._is_closing = True
self.may_reconnect = True
self._close_connection()
def _setup_exchange(self):
"""Set up the binding of the exchange and the possible creation of the exchange"""
self._logger.debug('Declaring an exchange on the message broker...')
self._logger.debug('Exchange Name: %s',
self._exchange_name)
self._channel.exchange_declare(
exchange=self._exchange_name,
exchange_type=self._exchange_type.value,
callback=self._cb_exchange_declared
)
def _cb_exchange_declared(self, method_frame: pika.frame.Method):
"""
Handle a successfully declared exchange
This callback will initiate the setup of a queue used to read messages from the message
broker
:param method_frame: Status of the exchange declaration
:type method_frame: pika.frame.Method, unused
:return:
"""
self._logger.debug('Successfully declared an exchange on the message broker')
self._logger.debug('Method Frame Contents: %s',
method_frame)
self._setup_queue()
def _setup_queue(self):
"""Set up a queue which is attached to the exchange"""
self._logger.debug('Setting up a queue at the message broker...')
self._channel.queue_declare(
self._queue_name,
passive=False,
exclusive=True,
auto_delete=True,
durable=False,
callback=self._cb_queue_declared
)
def _cb_queue_declared(self, method_frame: pika.frame.Method):
"""
Callback for a successfully created queue
This callback will initiate the binding of the queue to the exchange
:param method_frame: Status of the Method
:type method_frame: pika.frame.Method, unused
"""
self._logger.debug('Successfully set up a queue at the message broker')
self._logger.debug('Method Frame Contents: %s',
method_frame)
self._logger.debug('Binding the queue to the specified/created exchange...')
# Currently, there will be no routing key set since I'm not sure on how I want to
# implement the different exchange types
self._channel.queue_bind(
queue=self._queue_name,
exchange=self._exchange_name,
callback=self._cb_queue_bound
)
def _cb_queue_bound(self, method_frame: pika.frame.Method):
"""
Callback for a successful execution of the queue binding
This will trigger a setup for the quality of service on this consumer
:param method_frame: The result of the execution
:type method_frame: pika.frame.Method
"""
self._logger.debug('Successfully bound the queue to the exchange')
self._logger.debug('Method Frame Contents: | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:07:57 2019
@author: johnmount
"""
from abc import ABC
import math
import pprint
import warnings
import numpy
import pandas
import vtreat.util
import vtreat.transform
def ready_data_frame(d):
orig_type = type(d)
if orig_type == numpy.ndarray:
d = pandas.DataFrame(d)
d.columns = [str(c) for c in d.columns]
if not isinstance(d, pandas.DataFrame):
raise TypeError("not prepared to process type " + str(orig_type))
return d, orig_type
def back_to_orig_type_data_frame(d, orig_type):
if not isinstance(d, pandas.DataFrame):
raise TypeError("Expected result to be a pandas.DataFrame, found: " + str(type(d)))
columns = [c for c in d.columns]
if orig_type == numpy.ndarray:
d = numpy.asarray(d)
return d, columns
class VarTransform:
def __init__(self, incoming_column_name, derived_column_names, treatment):
self.incoming_column_name_ = incoming_column_name
self.derived_column_names_ = derived_column_names.copy()
self.treatment_ = treatment
self.need_cross_treatment_ = False
self.refitter_ = None
def transform(self, data_frame):
raise NotImplementedError("base method called")
class MappedCodeTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name, treatment, code_book):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], treatment
)
self.code_book_ = code_book
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
derived_column_name = self.derived_column_names_[0]
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
res = pandas.merge(
sf, self.code_book_, on=[self.incoming_column_name_], how="left", sort=False
) # ordered by left table rows
# could also try pandas .map()
res = res[[derived_column_name]].copy()
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = 0
return res
class YAwareMappedCodeTransform(MappedCodeTransform):
def __init__(
self,
incoming_column_name,
derived_column_name,
treatment,
code_book,
refitter,
extra_args,
params,
):
MappedCodeTransform.__init__(
self,
incoming_column_name=incoming_column_name,
derived_column_name=derived_column_name,
treatment=treatment,
code_book=code_book,
)
self.need_cross_treatment_ = True
self.refitter_ = refitter
self.extra_args_ = extra_args
self.params_ = params
class CleanNumericTransform(VarTransform):
def __init__(self, incoming_column_name, replacement_value):
VarTransform.__init__(
self, incoming_column_name, [incoming_column_name], "clean_copy"
)
self.replacement_value_ = replacement_value
def transform(self, data_frame):
col = vtreat.util.safe_to_numeric_array(data_frame[self.incoming_column_name_])
bad_posns = vtreat.util.is_bad(col)
col[bad_posns] = self.replacement_value_
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res
class IndicateMissingTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], "missing_indicator"
)
def transform(self, data_frame):
col = vtreat.util.is_bad(data_frame[self.incoming_column_name_])
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res.astype(float)
def fit_clean_code(*, incoming_column_name, x, params, imputation_map):
if not vtreat.util.numeric_has_range(x):
return None
replacement = params['missingness_imputation']
try:
replacement = imputation_map[incoming_column_name]
except KeyError:
pass
if vtreat.util.can_convert_v_to_numeric(replacement):
replacement_value = 0.0 + replacement
elif callable(replacement):
replacement_value = vtreat.util.summarize_column(x, fn=replacement)
else:
raise TypeError("unexpected imputation type " + str(type(replacement)) + " (" + incoming_column_name + ")")
if pandas.isnull(replacement_value) or math.isnan(replacement_value) or math.isinf(replacement_value):
raise ValueError("replacement was bad " + incoming_column_name + ": " + str(replacement_value))
return CleanNumericTransform(
incoming_column_name=incoming_column_name, replacement_value=replacement_value
)
def fit_regression_impact_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
if params["use_hierarchical_estimate"]:
sf["_impact_code"] = sf["_hest"] - sf["_gm"]
else:
sf["_impact_code"] = sf["_group_mean"] - sf["_gm"]
sf = sf.loc[:, ["x", "_impact_code"]].copy()
newcol = incoming_column_name + "_impact_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="impact_code",
code_book=sf,
refitter=fit_regression_impact_code,
extra_args=extra_args,
params=params,
)
def fit_regression_deviation_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
sf["_deviation_code"] = numpy.sqrt(sf["_var"])
sf = sf.loc[:, ["x", "_deviation_code"]].copy()
newcol = incoming_column_name + "_deviation_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="deviation_code",
code_book=sf,
refitter=fit_regression_deviation_code,
extra_args=extra_args,
params=params,
)
def fit_binomial_impact_code(*, incoming_column_name, x, y, extra_args, params):
outcome_target = (extra_args["outcome_target"],)
var_suffix = extra_args["var_suffix"]
y = numpy.asarray(numpy.asarray(y) == outcome_target, dtype=float)
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
eps = 1.0e-3
if params["use_hierarchical_estimate"]:
sf["_logit_code"] = numpy.log((sf["_hest"] + eps) / (sf["_gm"] + eps))
else:
sf["_logit_code"] = numpy.log((sf["_group_mean"] + eps) / (sf["_gm"] + eps))
sf = sf.loc[:, ["x", "_logit_code"]].copy()
newcol = incoming_column_name + "_logit_code" + var_suffix
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="logit_code",
code_book=sf,
refitter=fit_binomial_impact_code,
extra_args=extra_args,
params=params,
)
class IndicatorCodeTransform(VarTransform):
def __init__(
self,
incoming_column_name,
derived_column_names,
levels,
*,
sparse_indicators=False
):
VarTransform.__init__(
self, incoming_column_name, derived_column_names, "indicator_code"
)
self.levels_ = levels
self.sparse_indicators_ = sparse_indicators
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
col = sf[self.incoming_column_name_]
def f(i):
v = numpy.asarray(col == self.levels_[i]) + 0.0
if self.sparse_indicators_:
v = pandas.arrays.SparseArray(v, fill_value=0.0)
return v
res = [
pandas.DataFrame({self.derived_column_names_[i]: f(i)})
for i in range(len(self.levels_))
]
res = pandas.concat(res, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def fit_indicator_code(
*, incoming_column_name, x, min_fraction, sparse_indicators=False
):
sf = pandas.DataFrame({incoming_column_name: x})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
counts = sf[incoming_column_name].value_counts()
n = sf.shape[0]
counts = counts[counts > 0]
counts = counts[counts >= min_fraction * n] # no more than 1/min_fraction symbols
levels = [str(v) for v in counts.index]
if len(levels) < 1:
return None
return IndicatorCodeTransform(
incoming_column_name,
vtreat.util.build_level_codes(incoming_column_name, levels),
levels=levels,
sparse_indicators=sparse_indicators
)
def fit_prevalence_code(incoming_column_name, x):
sf = pandas.DataFrame({"x": x})
bad_posns = vtreat.util.is_bad(sf["x"])
sf.loc[bad_posns, "x"] = "_NA_"
sf.reset_index(inplace=True, drop=True)
n = sf.shape[0]
sf["_ni"] = 1.0
sf = pandas.DataFrame(sf.groupby("x")["_ni"].sum())
sf.reset_index(inplace=True, drop=False)
sf["_hest"] = sf["_ni"] / n
sf = sf.loc[:, ["x", "_hest"]].copy()
newcol = incoming_column_name + "_prevalence_code"
sf.columns = [incoming_column_name, newcol]
sf[incoming_column_name] = sf[incoming_column_name].astype(str)
sf.reset_index(inplace=True, drop=True)
return MappedCodeTransform(
incoming_column_name, newcol, treatment="prevalence_code", code_book=sf
)
# noinspection PyPep8Naming
def fit_numeric_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
for vi in cat_list:
if "impact_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "deviation_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_deviation_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_binomial_outcome_treatment(
*, X, y, outcome_target, var_list, outcome_name, cols_to_copy, params, imputation_map
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
v_counts = {v: vtreat.util.get_unique_value_count(X[v]) for v in var_list}
var_list = {v for v in var_list if v_counts[v] > 1}
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
id_like = [co for co in cat_list if v_counts[co] >= n]
if len(id_like) > 0:
warnings.warn("variable(s) " + ', '.join(id_like) + " have unique values per-row, dropping")
cat_list = [co for co in var_list if co not in set(id_like)]
if "clean_copy" in params["coders"]:
for vi in num_list:
xform = fit_clean_code(incoming_column_name=vi, x=X[vi], params=params, imputation_map=imputation_map)
if xform is not None:
# noinspection PyTypeChecker
xforms = xforms + [xform]
extra_args = {"outcome_target": outcome_target, "var_suffix": ""}
for vi in cat_list:
if "logit_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
| |
be in use for the record to be valid!")
errorsFound += 1
# Make sure the rdesc is available
if (recordTags["keyword"] and recordTags["rdesc"] != 1):
out.error("The tag <rdesc> was expected to have a count of 1, but was found with a count of %d for record %s" %
(recordTags["rdesc"], recordName))
errorsFound += 1
return (errorsFound, recordName)
# Check the <keyword> XML to make sure the required elements are found
def checkElementsKeyword(keyword, recordName):
errorsFound = 0
# Define the expected tags at this level
keywordTags = {"kwdesc" : 0, "kwformat" : 0, "kwlen" : 0, "kwdata" : 0, "ktvpdfile" : 0}
# Make sure the keyword has a name attrib, save for later use
keywordName = keyword.attrib.get("name")
if (keywordName == None):
out.error("<keyword> tag in record %s is missing the name attribute" % (recordName))
errorsFound += 1
keywordName = "INVALID" # Set the invalid name so the code below can use it without issue
# Loop thru the tags defined for this keyword
for child in keyword:
# Comments aren't basestring tags
if not isinstance(child.tag, basestring):
continue
# See if this is a tag we even expect
if child.tag not in keywordTags:
out.error("Unsupported tag <%s> found while parsing the <keyword> level for keyword %s in record %s" %
(child.tag, keywordName, recordName))
errorsFound += 1
# It was a supported tag
else:
keywordTags[child.tag] += 1
# Done looping through tags
# We've checked for unknown keyword tags, now make sure we have the right number of each
# The default is we expect the regular keyword tags to be there, so default to 1
keywordTagCount = 1
# If we found a ktvpdfile tag, make sure we only had one of them
if (keywordTags["ktvpdfile"] != 0):
if (keywordTags["ktvpdfile"] > 1):
out.error("The tag <ktvpdfile> is only allowed to be used once for keyword %s in record %s" %
(keywordName, recordName))
errorsFound += 1
# We had a ktvpdfile, now we don't want any of the regular keyword tags
keywordTagCount = 0
# Depending upon the state of ktvpdfile, check to ensure we are in the right state
for tag in ["kwdesc", "kwformat", "kwlen", "kwdata"]:
if (keywordTags[tag] != keywordTagCount):
out.error("The tag <%s> was expected to have a count of %d, but was found with a count of %d for keyword %s in record %s" %
(tag, keywordTagCount, keywordTags[tag], keywordName, recordName))
errorsFound += 1
return (errorsFound, keywordName)
# Function to write properly packed/encoded data to the vpdFile
def writeDataToVPD(vpdFile, data, offset = None):
rc = 0
# If the user gave an offset, save the current offset and seek to the new one
entryOffset = None
if (offset != None):
entryOffset = vpdFile.tell()
vpdFile.seekg(offset)
# Write the data
vpdFile.write(data)
# Restore the offset to original location if given
if (offset != None):
vpdFile.seekg(entryOffset)
return rc
# Turn the tvpd keyword data into packed binary data we can write to the file
def packKeyword(keyword, length, data, format):
# We'll return a bytearray of the packed data
keywordPack = bytearray()
# Write the keyword
keywordPack += bytearray(keyword.encode())
# Write the length
# The < at the front says to pack it little endian
if (keyword[0] == "#"): # Keywords that start with pound have a 2 byte length
# H is 2 bytes
keywordPack += struct.pack("<H", length)
else:
# B is 1 byte
keywordPack += struct.pack("<B", length)
# Write the data
# If the user didn't provide data = length given, we'll pad the end with 0's
if (format == "ascii"):
# Pad if necessary
data = data.ljust(length, '\0')
# Write it
keywordPack += bytearray(data.encode())
elif (format == "hex"):
# Remove white space and carriage returns from the data before we get to fromhex
# If we don't, it throws off the ljust logic below to set the field to proper length
data = data.replace(" ","")
data = data.replace("\n","")
# Pad if necessary (* 2 to convert nibble data to byte length)
data = data.ljust((length * 2), '0')
# Write it
keywordPack += bytearray.fromhex(data)
else:
out.error("Unknown format type %s passed into packKeyword" % format)
return None
# The keyword is packed, send it back
return keywordPack
# Calculate the length of the PF record
def calcPadFill(record):
pfLength = 0
# The PF keyword must exist
# The keyword section of record must be at least 40 bytes long, padfill will be used to achieve that
# If the keyword section is over over 40, it must be aligned on word boundaries and PF accomplishes that
# The record passed in at this point is the keywords + 3 other bytes (LR Tag & Record Length)
# Those 3 bytes happen to match the length of the PF keyword and its length which needs to be in the calculation
# So we'll just use the length of the record, but it's due to those offsetting lengths of 3
pfLength = 40 - len(record)
if (pfLength < 1):
# It's > 40, so now we just need to fill to nearest word
pfLength = (4 - (len(record) % 4))
return pfLength
# Check input hex data for proper formatting
def checkHexDataFormat(kwdata):
# Remove white space and carriage returns from the kwdata
kwdata = kwdata.replace(" ","")
kwdata = kwdata.replace("\n","")
# Now look to see if there are any characters other than 0-9 & a-f
match = re.search("([^0-9a-fA-F]+)", kwdata)
if (match):
out.error("A non hex character \"%s\" was found at %s in the kwdata" % (match.group(), match.span()))
return (match, kwdata)
############################################################
# Main - Main - Main - Main - Main - Main - Main - Main
############################################################
rc = 0
################################################
# Command line options
# Create the argparser object
# We disable auto help options here and add them manually below. This is so we can get all the optional args in 1 group
parser = argparse.ArgumentParser(description='The VPD image creation tool', add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Examples:
./createVpd.py -m examples/simple/simple.tvpd -o /tmp
./createVpd.py -m examples/rbinfile/rbinfile.tvpd -i examples/rbinfile -o /tmp
'''))
# Create our group of required command line args
reqgroup = parser.add_argument_group('Required Arguments')
reqgroup.add_argument('-m', '--manifest', help='The input file detailing all the records and keywords to be in the image', required=True)
reqgroup.add_argument('-o', '--outpath', help='The output path for the files created by the tool', required=True)
# Create our group of optional command line args
optgroup = parser.add_argument_group('Optional Arguments')
optgroup.add_argument('-h', '--help', action="help", help="Show this help message and exit")
optgroup.add_argument('-d', '--debug', help="Enables debug printing", action="store_true")
optgroup.add_argument('-c', '--record-mode', help="The input is a record only file. No output VPD binary created.", action="store_true")
optgroup.add_argument('-r', '--binary-records', help="Create binary files for each record in the template", action="store_true")
optgroup.add_argument('-k', '--binary-keywords', help="Create binary files for each keyword in the template", action="store_true")
optgroup.add_argument('-i', '--inpath', help="The search path to use for the files referenced in the manifest")
# We've got everything we want loaded up, now look for it
args = parser.parse_args()
# Get the manifest file and get this party started
clManifestFile = args.manifest
# Look for output path
clOutputPath = args.outpath
# Make sure the path exists, we aren't going to create it
if (os.path.exists(clOutputPath) != True):
out.error("The given output path %s does not exist!" % clOutputPath)
out.error("Please create the output directory and run again")
exit(1)
# Look for input path
clInputPath = args.inpath
# Make sure the path exists
if (clInputPath != None):
# Add the CWD onto the path so the local directory is always looked at
clInputPath += os.pathsep + "."
else:
# Set it the CWD since it will be used throughout the program and having it set to None breaks things
clInputPath = "."
# Debug printing
clDebug = args.debug
# Record only mode
clRecordMode = args.record_mode
# Create separate binary files for each record
clBinaryRecords = args.binary_records
# Create separate binary files for each keyword
clBinaryKeywords = args.binary_keywords
# We are going to do this in 3 stages
# 1 - Read in the manifest and any other referenced files. This will create a complete XML description of the VPD
# We will also check to make sure that all required tags are given and no extra tags exist
# 2 - Parse thru the now complete vpd | |
import os
import xml.etree.ElementTree as ET
import numpy as np
import copy
import cv2
from imgaug import augmenters as iaa
from boundbox import BoundBox
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def parse_annotation(ann_dir, img_dir, labels=[]):
all_imgs = []
seen_labels = {}
# limit = 16
for ann in sorted(os.listdir(ann_dir)):
# if len(all_imgs) == limit:
# break
img = {'object':[]}
tree = ET.parse(ann_dir + ann)
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = img_dir + elem.text
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs += [img]
return all_imgs, seen_labels
def generate_Xy(imgs, labels, anchors, n_grid, net_input_size, n_class, normalize, aug=True):
# input images
X_train = np.zeros((len(imgs), net_input_size, net_input_size, 3))
# desired network output
y_train = np.zeros((len(imgs), n_grid, n_grid, len(anchors)//2, 4 + 1 + n_class))
anchor_boxes = [BoundBox(0, 0, anchors[2*i], anchors[2*i+1]) for i in range(int(len(anchors)//2))]
n = 0
for img in imgs:
reshaped_image, all_objects = aug_image(img, net_input_size, aug=aug)
# image_name = img['filename']
# if '.jpg' not in image_name and '.png' not in image_name:
# image_name += '.jpg'
# image = cv2.imread(image_name)
# reshaped_image = cv2.resize(image, (net_input_size, net_input_size))
# reshaped_image = reshaped_image[:,:,::-1]
# all_objects = img['object']
reshaped_image = normalize(reshaped_image)
X_train[n] = reshaped_image
for obj in all_objects:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin'] and obj['name'] in labels:
# unit: grid cell
center_x = .5*(obj['xmin'] + obj['xmax'])
center_x = center_x / (float(net_input_size) / n_grid)
center_y = .5*(obj['ymin'] + obj['ymax'])
center_y = center_y / (float(net_input_size) / n_grid)
# grid_x: row, grid_y: column
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
if grid_x < n_grid and grid_y < n_grid:
obj_indx = labels.index(obj['name'])
# unit: grid cell
center_w = (obj['xmax'] - obj['xmin']) / (float(net_input_size) / n_grid)
center_h = (obj['ymax'] - obj['ymin']) / (float(net_input_size) / n_grid)
box = [center_x, center_y, center_w, center_h]
# find the anchor that best predicts this box
best_anchor = -1
max_iou = -1
shifted_box = BoundBox(0,
0,
center_w,
center_h)
for i in range(len(anchor_boxes)):
anchor = anchor_boxes[i]
iou = bbox_iou(shifted_box, anchor)
if max_iou < iou:
best_anchor = i
max_iou = iou
# assign ground truth x, y, w, h, confidence and class probs to y_batch
y_train[n, grid_y, grid_x, best_anchor, 0:4] = box
y_train[n, grid_y, grid_x, best_anchor, 4 ] = 1.
y_train[n, grid_y, grid_x, best_anchor, 5 + obj_indx] = 1
n += 1
return X_train, y_train
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
aug_pipe = iaa.Sequential(
[
# apply the following augmenters to most images
#iaa.Fliplr(0.5), # horizontally flip 50% of all images
#iaa.Flipud(0.2), # vertically flip 20% of all images
#sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
sometimes(iaa.Affine(
#scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
#translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
#rotate=(-5, 5), # rotate by -45 to +45 degrees
#shear=(-5, 5), # shear by -16 to +16 degrees
#order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
#cval=(0, 255), # if mode is constant, use a cval between 0 and 255
#mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
#sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
#iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges
#sometimes(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
#])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
#iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
#iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
#iaa.Grayscale(alpha=(0.0, 1.0)),
#sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
#sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
],
random_order=True
)
],
random_order=True
)
def aug_image(img, net_input_size, aug):
image_name = img['filename']
image_name = img['filename']
if '.jpg' not in image_name and '.png' not in image_name:
image_name += '.jpg'
image = cv2.imread(image_name)
if image is None: print('Cannot find ', image_name)
h, w, c = image.shape
all_objs = copy.deepcopy(img['object'])
if aug:
### scale the image
scale = np.random.uniform() / 10. + 1.
image = cv2.resize(image, (0,0), fx = scale, fy = scale)
### translate the image
max_offx = (scale-1.) * w
max_offy = (scale-1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
image = image[offy : (offy + h), offx : (offx + w)]
### flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: image = cv2.flip(image, 1)
image = aug_pipe.augment_image(image)
# resize the image to standard size
image = cv2.resize(image, (net_input_size, net_input_size))
image = image[:,:,::-1]
# fix object's position and size
for obj in all_objs:
for attr in ['xmin', 'xmax']:
if aug: obj[attr] = int(obj[attr] * scale - offx)
obj[attr] = int(obj[attr] * float(net_input_size) / w)
obj[attr] = max(min(obj[attr], net_input_size), 0)
for attr in ['ymin', 'ymax']:
if aug: obj[attr] = int(obj[attr] * scale - offy)
obj[attr] = int(obj[attr] * float(net_input_size) / h)
obj[attr] = max(min(obj[attr], net_input_size), 0)
if aug and flip > 0.5:
xmin = obj['xmin']
obj['xmin'] = net_input_size - obj['xmax']
obj['xmax'] = net_input_size - xmin
return image, all_objs
def decode_netout(netout, anchors, nb_class, obj_threshold=0.1, nms_threshold=0.1):
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row,col,b,5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + _sigmoid(x)) / grid_w # center position, unit: image width
y = (row + _sigmoid(y)) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height
confidence = netout[row,col,b,4]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, confidence, classes)
boxes.append(box)
# suppress non-maximal boxes
for c in range(nb_class):
sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
else:
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:
boxes[index_j].classes[c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box.get_score() > obj_threshold]
return boxes
def load_annotation(imgs, i, labels):
annots = []
for obj in imgs[i]['object']:
annot = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], labels.index(obj['name'])]
annots += [annot]
if len(annots) == 0: annots = [[]]
return np.array(annots)
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], | |
split
sampling_ratio: sampling ratios for train/val/test
ignore_clusters: do not use cluster information for train/test split
cluster_type: source of clustering information (uniref, cdhit05: cdhit threshold 0.5 similar to uniref procedure, cdhit04: cdhit with threshold 0.4)
bpe: apply BPE
bpe_vocab_size: vocabulary size (including original tokens) for BPE
sampling_method: sampling method for train test split as defined in dataset_utils
pick_representative_for_val_test: just select a single representative per cluster for validation and test set
mask_idx: index of the mask token (for BERT masked LM training) None for none
'''
print("Preparing netchop digested sprot LM data")
LM_PATH=Path(working_folder)
write_log_header(LM_PATH,locals())
if existing_netchop_peptides is not None:
peptides = proteins_from_fasta(data_path/existing_netchop_peptides)
df = sequences_to_df(peptides)
else:
peptides = netchop_digest(protein_fasta_file, netchop_path=netchop_path, threshold=0.7, repeats=netchop_repeats, min_length=netchop_min_length, max_length=netchop_max_length,verbose=True)
df = sequences_to_df(peptides)
# TODO
if(ignore_clusters is False):
df_cluster = load_cdhit(df,cluster_type,"netchop_peptides")
else:
df_cluster = None
self._preprocess_default(path=LM_PATH,df=df,df_cluster=df_cluster,pad_idx=pad_idx,sequence_len_min_aas=sequence_len_min_aas,sequence_len_max_aas=sequence_len_max_aas,sequence_len_max_tokens=sequence_len_max_tokens,exclude_aas=exclude_aas,nfolds=nfolds,sampling_ratio=sampling_ratio,
bpe=bpe,bpe_vocab_size=bpe_vocab_size,sampling_method_train=sampling_method_train,sampling_method_valtest=sampling_method_valtest,
randomize=randomize,random_seed=random_seed,mask_idx=mask_idx,pretrained_path=Path(pretrained_folder) if pretrained_folder !="" else None)
def clas_mhc_kim(self,mhc_select,cv_type="gs",cv_fold=0,working_folder="./clas_mhc",pretrained_folder="./lm_mhc"):
'''
Prepares Kim14 data of one allele with BD09 as train set and Blind as test set.
The dataset is available at http://tools.iedb.org/main/datasets/ under bulletpoint "Dataset size and composition impact the reliability of performance benchmarks for peptide-MHC binding predictions." as benchmark_reliability.tar.gz and should be saved in ../data
Returns raw dataframe with columns (species,mhc,peptide_length,cv,sequence,inequality,meas,label,cluster_ID) and allele ranking as dataframe with columns ("mhc","rank") if mhc_select snf working_folder are set to None.
mhc_select: int between 0 and 52, choose allele by frequency rank in Binding Data 2009
cv_type: string, strategy for 5-fold cross validation, options:
- None: No cv-strategy, cv column is filled with 'TBD'
- sr: removal of similar peptides seperatly in binder/ non-binder set, using similarity threshold of 80%, similarity found with 'Hobohm 1 like algorithm'
- gs: grouping similar peptides in the same cv-partition
- rnd: random partioning
cv_fold: 0..4 selects the cv_fold to be used as validation set (in case cv_type!= None)
'''
if mhc_select is not None and working_folder is not None:
print("Preparing mhc classification dataset #"+str(mhc_select)+" ...")
CLAS_PATH = Path(working_folder)
LM_PATH=Path(pretrained_folder) if pretrained_folder!="" else None
write_log_header(CLAS_PATH,locals())
df_clas = generate_mhc_kim(cv_type=cv_type, mhc_select=mhc_select, to_csv=False, filename=None, data_dir=data_path,regression=True,transform_ic50="log")
#generate fake cluster df for train test split
df_clas["cluster_ID"]=0 #mark everything as train
if(not cv_type in ["sr","gs","rnd"]):#use blind set as val set (if no cv splits provided)
df_clas.loc[df_clas.cv=="blind","cluster_ID"]=1
else:#cv_type specified
df_clas.loc[df_clas.cv=="blind","cluster_ID"]=2#assign blind to test
df_clas.loc[df_clas.cv==cv_fold,"cluster_ID"]=1#assign selected cv_fold to val
if mhc_select is not None and working_folder is not None:
self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df_clas,df_cluster=df_clas,sampling_method_train=-1,sampling_method_valtest=-1,regression=True,ignore_pretrained_clusters=True)
else:
return df_clas,df_clas[df_clas.cv!="blind"].groupby('mhc').size().sort_values(ascending=False).reset_index()["mhc"].reset_index().rename(columns={"index":"rank"})
def clas_mhc_flurry(self,mhc_select,working_folder="./clas_mhc_flurry",pretrained_folder="./lm_mhc_flurry",test_set=None,pad_idx=0,mask_idx=1,sequence_len_min_aas=0,sequence_len_max_aas=0,sequence_len_max_tokens=0,bpe=False,bpe_vocab_size=100,random_seed=42,regression=True,transform_ic50=None,label=False):
'''
test_set="hpv" available for alleles
['HLA-A*01:01','HLA-A*11:01','HLA-A*02:01','HLA-A*24:02',
'HLA-A*03:01','HLA-B*15:01','HLA-B*07:02']
'''
print("Preparing mhc classification dataset "+str(mhc_select)+" ...")
CLAS_PATH = Path(working_folder)
LM_PATH=Path(pretrained_folder) if pretrained_folder!="" else None
write_log_header(CLAS_PATH,locals())
df_clas = generate_mhc_flurry(ms='noMS', mhc_select=mhc_select, binder_threshold=500, filter_length=True, label=label,data_dir=data_path, regression=regression,transform_ic50=transform_ic50)
if(test_set=="abelin"):
df_test = generate_abelin(mhc_select=mhc_select,data_dir=data_path)
df_clas = pd.concat([df_clas,df_test], ignore_index=True,sort=True)
elif(test_set=="hpv"):
flurry_hpv_map = {'HLA-A*01:01':'HLAA1',
'HLA-A*11:01':'HLAA11',
'HLA-A*02:01':'HLAA2',
'HLA-A*24:02':'HLAA24',
'HLA-A*03:01':'HLAA3',
'HLA-B*15:01':'HLAB15',
'HLA-B*07:02':'HLAB7'}
df_test = prepare_hpv(flurry_hpv_map[mhc_select], data_dir="../git_data")
df_test["cluster_ID"]=2
df_clas = pd.concat([df_clas,df_test], ignore_index=True,sort=True)
#self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df_clas,df_cluster=df_clas,pad_idx=pad_idx,mask_idx=mask_idx,sequence_len_min_aas=sequence_len_min_aas,sequence_len_max_aas=sequence_len_max_aas,sequence_len_max_tokens=sequence_len_max_tokens,bpe=bpe,bpe_vocab_size=bpe_vocab_size,sampling_method_train=-1,sampling_method_valtest=-1,regression=regression)
self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df,df_cluster=df,sampling_method_train=-1,sampling_method_valtest=-1,regression=True,ignore_pretrained_clusters=True)
def clas_mhc_i_zhao(self, mhc_select, working_folder="./clas_mhc_i_zhao",pretrained_folder="./lm_mhc",train_set="MHCFlurry18"):
'''
Prepares one allele from IEDB16_I test data from https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006457:
Use either Kim14 or MHCFlurry18 data as training set.
Choose the allele by frequency rank in the selected training set.
Returns raw dataframe with columns ('ID', 'allele', 'cluster_ID', 'inequality', 'label', 'meas','measurement_source', 'measurement_type', 'original_allele','sequence']) and allele ranking as dataframe with columns ("allele","rank") if mhc_select snf working_folder are set to None.
mhc_select: integer between 0...29 or ['HLA-A-0205', 'HLA-B-2704', 'HLA-B-2706'], the latter are not in Kim data, thus MHCFlurry18 data is used
train_set: "MHCFlurry18" or "Kim14"
'''
assert train_set in ["MHCFlurry18","Kim14"], "no valid train set"
if mhc_select is not None and working_folder is not None:
print("Preparing mhc I dataset #"+str(mhc_select))
CLAS_PATH = Path(working_folder)
LM_PATH=Path(pretrained_folder) if pretrained_folder!="" else None
write_log_header(CLAS_PATH,locals())
df_test = prepare_mhci_pcbi()
df_test["cluster_ID"]=2
if train_set=="Kim14":
if mhc_select in ['HLA-A-0205', 'HLA-B-2704', 'HLA-B-2706']:
df_train = generate_mhc_flurry(ms='noMS', mhc_select=None, regression=True, transform_ic50="log", data_dir="../data")
df_train["allele"] = df_train["allele"].apply(lambda x: x.replace("*","-",1).replace(":","",1) if x.startswith("HLA") else x)
else:
tmp = generate_mhc_kim(cv_type="gs", mhc_select=None, regression=True, transform_ic50="log",data_dir="../data/benchmark_mhci_reliability/binding")
df_train = generate_mhc_kim(cv_type="gs", mhc_select=None, regression=True, transform_ic50="log",data_dir="../data/benchmark_mhci_reliability/binding").rename(columns={"mhc":"allele"})
df_train["cluster_ID"]=0 #mark everything as train
df_train.loc[df_train.cv==0,"cluster_ID"]=1 # validation set
train_alleles_also_in_test_set = df_train["allele"][df_train["allele"].isin(df_test["allele"].unique())].unique()
allele_ranking = df_train[df_train["allele"].isin(train_alleles_also_in_test_set)].groupby("allele").size().sort_values(ascending=False).index
elif train_set=="MHCFlurry18":
df_train = generate_mhc_flurry(ms='noMS', mhc_select=None, regression=True, transform_ic50="log", data_dir="../data")
df_train["allele"] = df_train["allele"].apply(lambda x: x.replace("*","-",1).replace(":","",1) if x.startswith("HLA") else x)
test_alleles = df_test.allele.unique()
df_train = df_train[df_train.allele.isin(test_alleles)]
print(test_alleles.shape, df_train.allele.nunique())
allele_ranking = df_train.groupby("allele").size().sort_values(ascending=False).index
df = concat([df_train,df_test],ignore_index=True,sort=True)
df.duplicated(["allele","sequence"],keep="last")
# Take sequences also present in test set out of the training set
df = df[~df.duplicated(["allele","sequence"],keep="last")]
if mhc_select is not None:
if train_set=="Kim14":
if mhc_select in range(30):
#print(allele_ranking[mhc_select])
df = df[df.allele==allele_ranking[mhc_select]]
elif mhc_select in ['HLA-A-0205', 'HLA-B-2704', 'HLA-B-2706']:
df = df[df.allele==mhc_select]
elif train_set=="MHCFlurry18":
df = df[df.allele==allele_ranking[mhc_select]]
if working_folder is not None:
self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df,df_cluster=df,sampling_method_train=-1,sampling_method_valtest=-1,regression=True,ignore_pretrained_clusters=True)
else:
return df,df_train.groupby("allele").size().sort_values(ascending=False).reset_index()["allele"].reset_index().rename(columns={"index":"rank"})
def clas_mhc_i_hpv(self, mhc_select, working_folder="./clas_mhc_i_hpv",pretrained_folder="./lm_mhc", train_set="MHCFlurry18"):
'''
Prepare test data from <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. Performance evaluation of MHC class-I binding prediction tools based on an experimentally validated MHC–peptide binding data set. Cancer Immunol Res 2019;7:719–36.
Save the dataset as HPV_data.csv in ./data.
Use either Kim14 or MHCFlurry18 data as training set.
mhc_select: string from ['HLAA1', 'HLAA2', 'HLAA3', 'HLAA11', 'HLAA24', 'HLAB7', 'HLAB15']
train_set: "MHCFlurry18" or "Kim14"
'''
assert train_set in ["MHCFlurry18","Kim14"], "no valid train set"
CLAS_PATH = Path(working_folder)
LM_PATH=Path(pretrained_folder) if pretrained_folder!="" else None
df = prepare_hpv(mhc_select, data_dir="../git_data")
df["cluster_ID"] = 2 #mark everything as test
if train_set=="MHCFlurry18":
flurry_hpv_map = {'HLAA1': 'HLA-A*01:01',
'HLAA11': 'HLA-A*11:01',
'HLAA2': 'HLA-A*02:01',
'HLAA24': 'HLA-A*24:02',
'HLAA3': 'HLA-A*03:01',
'HLAB15': 'HLA-B*15:01',
'HLAB7': 'HLA-B*07:02'}
df_train = generate_mhc_flurry(ms='noMS', mhc_select=flurry_hpv_map[mhc_select], regression=True, transform_ic50="log", binder_threshold=500, filter_length=True, label_binary=False, data_dir=data_path)
elif train_set=="Kim14":
kim_hpv_map = {'HLAA1': 'HLA-A-0101',
'HLAA11': 'HLA-A-1101',
'HLAA2': 'HLA-A-0201',
'HLAA24': 'HLA-A-2402',
'HLAA3': 'HLA-A-0301',
'HLAB15': 'HLA-B-1501',
'HLAB7': 'HLA-B-0702'}
df_train = generate_mhc_kim(cv_type="gs", mhc_select=kim_hpv_map[mhc_select], regression=True, transform_ic50="log",data_dir=data_path).rename(columns={"mhc":"allele"})
#df_train = df_train[df_train["allele"]==kim_hpv_map[mhc_select]]
df_train["cluster_ID"]=0 #mark everything as train
df_train.loc[df_train.cv==0,"cluster_ID"]=1#assign one cv_fold to val
df = pd.concat([df, df_train], ignore_index=True,sort=True)
self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df,df_cluster=df,sampling_method_train=-1,sampling_method_valtest=-1,regression=True,ignore_pretrained_clusters=True)
def clas_mhc_sars_cov(self, mhc_select, train_on, MS=False, working_folder="./clas_mhc_i_sars_cov",pretrained_folder="./lm_mhc"):
'''
Prepare test data from https://www.nature.com/articles/s41598-020-77466-4#MOESM1 - stability measurements of SARS-CoV-2 peptides
train_on: string, options "netmhcpan41", "netmhcpan4", "flurry"
Use MHCFlurry18 or NetMHCpan data as training set for MHC I and NetMHCpan dataset for MHC II
Optionally use MS data from NetMHCpan with MS set to True.
mhc_select: string from ["1 A0101",
"2 A0201",
"3 A0301",
"4 A1101",
"5 A2402",
"6 B4001",
"7 C0401",
"8 C0701",
"9 C0702",
"10 C0102",
"11 DRB10401"]
working_folder: if None, returns dataframe
'''
CLAS_PATH = Path(working_folder) if working_folder is not None else None
LM_PATH=Path(pretrained_folder) if pretrained_folder!="" else None
df = prepare_sars_cov(mhc_select, data_dir=data_path)
df["cluster_ID"] = 2 #mark everything as test
if MS:
df["label"] = df["label"] >= 60
if mhc_select=="11 DRB10401":
# always use netmhc 3.2 data for training
df_train = prepare_mhcii_netmhcpan(mhc_select="DRB1_0401", MS=MS, data_dir=data_path, netmhc_data_version="3.2")
else:
# mapping between sheet titles in Covid19-Intavis-Immunitrack-datasetV2.xlsx corresponding to allele names
# to allele names in MHCFlurry18 dataset
if train_on=="flurry":
allele_map = {"1 A0101": 'HLA-A*01:01',
"2 A0201": 'HLA-A*02:01',
"3 A0301": 'HLA-A*03:01',
"4 A1101": 'HLA-A*11:01',
"5 A2402": 'HLA-A*24:02',
"6 B4001": 'HLA-B*40:01',
"7 C0401": 'HLA-C*04:01',
"8 C0701": 'HLA-C*07:01',
"9 C0702": 'HLA-C*07:02' ,
"10 C0102": 'HLA-C*01:02'}
df_train = generate_mhc_flurry(ms='noMS', mhc_select=allele_map[mhc_select], regression=True, transform_ic50="log", binder_threshold=500, filter_length=True, label_binary=False, data_dir=data_path)
else:
allele_map = {"1 A0101": 'HLA-A01:01',
"2 A0201": 'HLA-A02:01',
"3 A0301": 'HLA-A03:01',
"4 A1101": 'HLA-A11:01',
"5 A2402": 'HLA-A24:02',
"6 B4001": 'HLA-B40:01',
"7 C0401": 'HLA-C04:01',
"8 C0701": 'HLA-C07:01',
"9 C0702": 'HLA-C07:02' ,
"10 C0102": 'HLA-C01:02'}
netmhc_version = "4.0" if train_on=="netmhcpan4" else "4.1"
df_train = prepare_mhci_netmhcpan_4(mhc_select=allele_map[mhc_select], MS=MS, data_dir=data_path, netmhc_data_version=netmhc_version)
df = pd.concat([df, df_train], ignore_index=True,sort=True)
if MS:
df["label"] = df["label"].astype(int)
if working_folder is not None:
self._preprocess_default(path=CLAS_PATH,pretrained_path=LM_PATH,df=df,df_cluster=df,sampling_method_train=-1,sampling_method_valtest=-1,regression=False if MS else True,ignore_pretrained_clusters=True)
else:
return df
def clas_mhc_sars_cov_pan(self, mhc_class, working_folder="./clas_mhc_i_sars_cov_pan"):
CLAS_PATH = Path(working_folder)
train = prepare_mhci_netmhcpan_4(None, with_MHC_seq=True, data_dir=data_path, netmhc_data_version="4.0")
test = prepare_sars_cov(None, mhc_class=mhc_class, with_MHC_seq=True, data_dir=data_path)
test["cluster_ID"] = 2
df = pd.concat([train,test], axis=0, sort=True, ignore_index=True)
# concat MHC pseudo sequence and peptide sequence
df["sequence"] = df["sequence1"] + "x" + df["sequence"]
prep = Preprocess()
# provide previous tokens so that "x" is mapped to '_pad_'
prev_tok = ['_pad_', '_mask_', '_bos_', 'G', 'E', 'S', 'P', 'A', 'D', 'T', 'V', 'L', 'R', 'N', 'I', 'Q', 'K', 'C', 'F', 'H', 'M', 'Y', 'W']
self._preprocess_default(path=CLAS_PATH,
df=df,df_cluster=df,
sampling_method_train=-1,sampling_method_valtest=-1,
regression=True,ignore_pretrained_clusters=True,
tok_itos_in=prev_tok)
def clas_mhc_ii(self, mhc_select, working_folder="./clas_mhc_ii",pretrained_folder="./lm_mhc"):
'''
Prepares IEDB16_II data from https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006457: of on allele.
Uses | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gtk
import os
import uuid
import math
import subprocess
import time
import sys
import gobject
import platform
import json
import threading
import copy
import re
from Queue import Queue
try:
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
sys.path.append("../..")
from hyperspeed import config_folder
from hyperspeed.stack import Stack, Dependency, DEPENDENCY_TYPES
from hyperspeed import mistika
from hyperspeed import human
from hyperspeed.copy import copy_with_progress
except ImportError:
mistika = False
config_folder = os.path.expanduser('~/.mistika-hyperspeed/fetch.cfg')
script_settings_path = os.path.join(config_folder, 'fetch.cfg')
COLOR_DEFAULT = '#000000'
COLOR_DISABLED = '#888888'
COLOR_WARNING = '#ff8800'
COLOR_ALERT = '#cc0000'
def get_size(localOrRemote):
remote = re.search(r'^(.*?)(?:([^\s@]*)@)?([^\s@]+)\:(.+)', localOrRemote)
if remote:
remoteArgs = remote.group(1)
remoteUser = remote.group(2)
remoteHost = remote.group(3)
remotePath = remote.group(4)
# print remoteArgs, remoteUser, remoteHost, remotePath
cmd = ['ssh']
cmd += remoteArgs.split()
if remoteUser:
cmd += ['-l', remoteUser]
cmd += [remoteHost]
cmd += ['ls', '-n', '"'+remotePath+'"']
#print cmd
sshProc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=open(os.devnull, 'wb'))
if sshProc.wait() == 0:
result = sshProc.communicate()[0]
return int(result.split()[4])
else:
return 0
else:
try:
return os.path.getsize(localOrRemote)
except OSError:
return 0
class PyApp(gtk.Window):
batch_mode = False
settings = {
'mappings' : {},
}
statPoints = []
bytesCopied = 0
def __init__(self):
super(PyApp, self).__init__()
self.stacks = {}
self.dependencies = {}
self.threads = []
self.queue_size = 0
self.last_update = {'time':0, 'copied':0}
screen = self.get_screen()
self.set_title("Fetch")
self.set_size_request(screen.get_width()/2-100, screen.get_height()-200)
self.set_border_width(20)
self.set_position(gtk.WIN_POS_CENTER)
if 'darwin' in platform.system().lower():
self.set_resizable(False) # Because resizing crashes the app on Mac
# self.connect("key-press-event",self.on_key_press_event)
vbox = gtk.VBox(False, 10)
vbox.pack_start(self.init_mappings_view(), False, False, 5)
vbox.pack_start(self.init_stacks_view())
vbox.pack_start(self.init_dependencies_view())
#menu = ['Sync project', 'Sync media']
footer = gtk.HBox(False, 10)
quitButton = gtk.Button('Quit')
quitButton.set_size_request(70, 30)
quitButton.connect("clicked", self.on_quit)
footer.pack_end(quitButton, False, False)
vbox.pack_end(footer, False, False, 10)
self.add(vbox)
self.connect("destroy", self.on_quit)
self.show_all()
self.parse_command_line_arguments()
self.load_settings()
self.init_finder_daemon()
self.init_fetch_daemon()
gobject.idle_add(self.present)
def load_settings(self):
try:
# print 'script_settings_path', script_settings_path, open(script_settings_path).read()
self.settings.update(json.loads(open(script_settings_path).read()))
treestore = self.mappings_treestore
treestore.clear();
# print self.settings['mappings']
for local, remotes in self.settings['mappings'].iteritems():
local_row = treestore.append(None, [local])
for remote in remotes:
treestore.append(local_row, [remote])
# print self.settings
except Exception as e:
print e
pass
else:
pass
finally:
pass
def save_settings(self):
open(script_settings_path, 'w').write(json.dumps(self.settings, sort_keys=True, indent=2))
def on_quit(self, widget):
gtk.main_quit()
def parse_command_line_arguments(self):
# -H /Volumes/mediaraid/Projects/22189_Hurtigruta/MISTIKA_JS/MR2_0009_0021.js -L /Volumes/mediaraid/Projects/22189_Hurtigruta/MISTIKA_JS/L_MR2_0009_0021.js -S None -l 0 -n MR2_0009_0021_Raftsundet_V1-0004 -i 0 -s 0 -e 249 -p 22189_Hurtigruta -f RGB10:XFS.RGB10 -T 00:56:28:13 -a 160
if len(sys.argv) > 1:
print 'Command line arguments:', ' '.join(sys.argv[1:])
dependencies = []
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-H' and len(sys.argv) > i+1:
if sys.argv[i+1] != 'None':
dependencies.append(Dependency(sys.argv[i+1], 'highres'))
if sys.argv[i] == '-L' and len(sys.argv) > i+1:
if sys.argv[i+1] != 'None':
dependencies.append(Dependency(sys.argv[i+1], 'lowres'))
if sys.argv[i] == '-S' and len(sys.argv) > i+1:
if sys.argv[i+1] != 'None':
dependencies.append(Dependency(sys.argv[i+1], 'audio'))
for dependency in dependencies:
if not dependency.path in self.dependencies:
self.dependencies[dependency.path] = dependency
gobject.idle_add(self.gui_dependency_add, dependency)
def on_mapping_edited(self, cellrenderertext, path, new_text):
# print cellrenderertext, path, new_text
treestore = self.mappings_treestore
treestore[path][0] = new_text
self.on_mappings_changed()
def on_mappings_changed(self):
treestore = self.mappings_treestore
mappings = {}
for x in treestore:
mapping = []
for y in x.iterchildren():
# print '-', y[0]
mapping.append(y[0])
# print x[0]
mappings[x[0]] = mapping
self.settings['mappings'] = mappings
self.save_settings()
def init_mappings_view(self):
expander = gtk.Expander("Mappings")
vbox = gtk.VBox(False, 10)
hbox = gtk.HBox(False, 10)
button = gtk.Button('Add local folder')
button.connect("clicked", self.add_local_folder)
hbox.pack_start(button, False, False, 0)
button = gtk.Button('Add source')
button.connect("clicked", self.add_remote_folder)
hbox.pack_start(button, False, False, 0)
button = gtk.Button('Remove selected')
button.connect("clicked", self.remove_selected_mappings)
hbox.pack_start(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
treestore = self.mappings_treestore = gtk.TreeStore(str) # Local, Local editable, Remote, Remote editable
treeview = self.mappings_treeview = gtk.TreeView()
treeview.set_rules_hint(True)
cell = gtk.CellRendererText()
cell.set_property("editable", True)
cell.connect('edited', self.on_mapping_edited, )
column = gtk.TreeViewColumn('Mappings', cell, text=0)
column.set_resizable(True)
column.set_expand(True)
treeview.append_column(column)
treeview.set_model(treestore)
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled_window.add(treeview)
vbox.pack_start(scrolled_window)
expander.add(vbox)
return expander
def add_local_folder(self, widget):
treeview = self.mappings_treeview
row_iter = self.mappings_treestore.append(None, ['/'])
row_path = self.mappings_treestore.get_path(row_iter)
selection = treeview.get_selection()
selection.select_path(row_path)
self.on_mappings_changed()
def add_remote_folder(self, widget):
treeview = self.mappings_treeview
selection = treeview.get_selection()
(model, row_paths) = selection.get_selected_rows()
for selected_row_path in row_paths:
if type(selected_row_path) is tuple:
selected_row_path = selected_row_path[0]
selected_row_iter = model.get_iter(selected_row_path)
row_iter = self.mappings_treestore.append(selected_row_iter, ['user@host:/'])
row_path = self.mappings_treestore.get_path(row_iter)
treeview.expand_to_path(row_path)
selection.unselect_all()
selection.select_path(row_path)
self.on_mappings_changed()
def remove_selected_mappings(self, widget):
treeview = self.mappings_treeview
treestore = self.mappings_treestore
selection = treeview.get_selection()
(model, row_paths) = selection.get_selected_rows()
for selected_row_path in reversed(row_paths):
selected_row_iter = model.get_iter(selected_row_path)
treestore.remove(selected_row_iter)
self.on_mappings_changed()
def init_stacks_view(self):
vbox = gtk.VBox(False, 10)
hbox = gtk.HBox(False, 10)
hbox.pack_start(gtk.Label('Environments, groups or other structures to consolidate'), False, False)
spacer = gtk.HBox(False)
hbox.pack_start(spacer)
button = gtk.Button('Add structure ...')
button.connect("clicked", self.add_file_dialog)
hbox.pack_start(button, False, False, 0)
button = gtk.Button('Remove selected')
button.connect("clicked", self.gui_on_selected_stacks, 'remove')
hbox.pack_start(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
treestore = self.stacks_treestore = gtk.TreeStore(str, float, str, bool, bool) # Name, progress float, progress text, progress visible, status visible
treeview = self.stacks_treeview = gtk.TreeView()
treeview.set_rules_hint(True)
cell = gtk.CellRendererText()
cell.set_property("editable", True)
column = gtk.TreeViewColumn('Stack', cell, text=0)
column.set_resizable(True)
column.set_expand(True)
treeview.append_column(column)
cell = gtk.CellRendererText()
column = gtk.TreeViewColumn('Status')
column.pack_start(cell, False)
column.set_attributes(cell, text=2, visible=4)
cell = gtk.CellRendererProgress()
column.pack_start(cell, True)
column.set_attributes(cell, value=1, text=2, visible=3)
column.set_resizable(True)
treeview.append_column(column)
treeview.set_model(treestore)
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
# treeview.expand_all()
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled_window.add(treeview)
vbox.pack_start(scrolled_window)
return vbox
def init_dependencies_view(self):
vbox = gtk.VBox(False, 10)
hbox = gtk.HBox(False, 10)
hbox.pack_start(gtk.Label('Missing files'), False, False)
spacer = gtk.HBox(False)
hbox.pack_start(spacer)
self.status_label = gtk.Label('No stacks loaded')
# hbox.pack_start(self.status_label, False, False, 5)
spinner = self.spinner_queue = gtk.Image()
spinner.set_no_show_all(True)
try:
spinner.set_from_file('../../res/img/spinner01.gif')
except:
pass
hbox.pack_start(spinner, False, False, 5)
# button = gtk.Button('Include selected')
# button.connect("clicked", self.gui_on_selected_dependencies, 'unskip')
# hbox.pack_end(button, False, False, 0)
# button = gtk.Button('Skip selected')
# button.connect("clicked", self.gui_on_selected_dependencies, 'skip')
# hbox.pack_end(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
treestore = self.dependencies_treestore = gtk.TreeStore(str, float, str, bool, str, str, str, str, bool) # Name, progress float, progress text, progress visible, details, human size, status, text color, status visible
treeview = self.dependencies_treeview = gtk.TreeView()
treeview.set_tooltip_column(4)
treeview.set_rules_hint(True)
treeselection = treeview.get_selection()
treeselection.set_mode(gtk.SELECTION_MULTIPLE)
self.dependency_types = copy.copy(DEPENDENCY_TYPES)
for dependency_type_id, dependency_type in self.dependency_types.iteritems():
dependency_type.meta = {
'count' : 0,
'size' : 0,
'copied' : 0
}
row_iter = treestore.append(None, [dependency_type.description, 0.0, '', False, dependency_type.description, '', '', COLOR_DISABLED, False])
row_path = treestore.get_path(row_iter)
dependency_type.row_reference = gtk.TreeRowReference(treestore, row_path)
cell = gtk.CellRendererText()
cell.set_property("editable", True)
column = gtk.TreeViewColumn('', cell, text=0, foreground=7)
column.set_resizable(True)
column.set_expand(True)
treeview.append_column(column)
cell = gtk.CellRendererText()
column = gtk.TreeViewColumn('Source')
column.pack_start(cell, False)
column.set_attributes(cell, text=6, foreground=7, visible=8)
cell = gtk.CellRendererProgress()
column.pack_start(cell, True)
column.set_attributes(cell, value=1, text=2, visible=3)
column.set_resizable(True)
treeview.append_column(column)
cell = gtk.CellRendererText()
column = gtk.TreeViewColumn('Size', cell, text=5, foreground=7)
column.set_resizable(True)
treeview.append_column(column)
treeview.set_model(treestore)
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
# treeview.expand_all()
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled_window.add(treeview)
vbox.pack_start(scrolled_window)
hbox = gtk.HBox(False, 10)
label = gtk.Label('Transferred:');
hbox.pack_start(label, False, False, 5)
label = self.bytes_copied_label = gtk.Label('0B');
hbox.pack_start(label, False, False, 5)
label = gtk.Label('Time:');
hbox.pack_start(label, False, False, 5)
label = self.time_copied_label = gtk.Label('0');
hbox.pack_start(label, False, False, 5)
label = gtk.Label('Average rate:');
hbox.pack_start(label, False, False, 5)
label = self.average_rate_label = gtk.Label('0B/s');
hbox.pack_start(label, False, False, 5)
label = gtk.Label('Current rate:');
hbox.pack_start(label, False, False, 5)
label = self.rate_label = gtk.Label('0B/s');
hbox.pack_start(label, False, False, 5)
spacer = gtk.HBox(False)
hbox.pack_start(spacer)
button = gtk.Button('Fetch selected')
button.connect("clicked", self.selected_dependencies_perform, 'fetch')
hbox.pack_end(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
return vbox
def add_file_dialog(self, widget):
if mistika:
folder = os.path.join(mistika.projects_folder, mistika.project, 'DATA')
else:
folder = '/'
dialog = gtk.FileChooserDialog(title="Add files", parent=None, action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK), backend=None)
dialog.set_select_multiple(True)
#dialog.add_filter(filter)
dialog.add_shortcut_folder(mistika.env_folder)
dialog.add_shortcut_folder(folder)
dialog.set_current_folder(folder)
filter = gtk.FileFilter()
filter.set_name("Mistika structures")
filter.add_pattern("*.fx")
filter.add_pattern("*.env")
filter.add_pattern("*.grp")
filter.add_pattern("*.rnd")
filter.add_pattern("*.clp")
filter.add_pattern("*.lnk")
response = dialog.run()
if response == gtk.RESPONSE_OK:
for stack_path in dialog.get_filenames():
self.gui_stack_add(stack_path)
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
def gui_stack_add(self, stack_path):
if stack_path in self.stacks:
return
self.stacks[stack_path] = Stack(stack_path)
stack = self.stacks[stack_path]
row_iter = self.stacks_treestore.append(None, [stack_path, 0.0, '0%', False, False])
row_path = self.stacks_treestore.get_path(row_iter)
stack.row_reference = gtk.TreeRowReference(self.stacks_treestore, row_path)
# for dependency in stack.dependencies:
# self.dependencies_treestore.append(None, [dependency.name])
# print 'creating thread'
t = threading.Thread(target=self.get_dependencies, args=[stack])
self.threads.append(t)
t.setDaemon(True)
t.start()
# print 'started thread'
# print threading.active_count()
sources = {}
def finder_daemon(self):
q = self.finder_queue = Queue()
while True:
dependency = q.get()
for localPath in self.settings['mappings']:
if dependency.path.startswith(localPath):
for source in self.settings['mappings'][localPath]:
sourcePath = dependency.path.replace(localPath, source)
with dependency.lock:
if '%' in dependency.path:
dependency._size = 0
for frame_range in dependency.frame_ranges:
frame_range._size = 0
for i in range(frame_range.start, frame_range.end+1):
frame_range._size += get_size(sourcePath % i)
dependency._size += frame_range.size
else:
dependency._size = get_size(sourcePath)
if dependency._size > 4*1000*1000*1000 and dependency.path.endswith('.R3D'):
match = re.search(r"(\d{3})\.R3D$", dependency.path)
if match:
index = int(match.group(1))
nextPath = dependency.path.replace(match.group(1)+'.R3D', '%03d.R3D' % (index + 1))
self.add_dependency(Dependency(nextPath, dependency.type, dependency.start, dependency.end, dependency, dependency.level, dependency.x, dependency.duration))
# print 'Found', sourcePath, human.size(dependency.size)
if dependency._size:
break
if dependency._size > 0:
self.sources[dependency.path] = sourcePath;
gobject.idle_add(self.gui_row_update, self.dependencies_treestore, dependency.row_reference, | |
<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_load_balancer_backend_set
short_description: Create, update and delete a backend set of a load balancer.
description:
- Create an OCI Load Balancer Backend Set
- Update OCI Load Balancers Backend Set, if present.
- Delete OCI Load Balancers Backend Set, if present.
version_added: "2.5"
options:
load_balancer_id:
description: Identifier of the Load Balancer. Mandatory for create,delete and update.
required: true
aliases: ['id']
name:
description: Name of the Load Balancer Backend Set. A user friendly name. Does not have to be unique,
and could be changed. Mandatory for create and update.
required: false
state:
description: Create,update or delete Load Balancer Backend Set. For I(state=present), if it
does not exists, it gets created. If exists, it gets updated.
required: false
default: 'present'
choices: ['present','absent']
policy:
description: The load balancer policy for the backend set. M(oci_load_balancer_policy_facts) could be
used to fetch policy types suupported by OCI Load Balancer Service.
required: false
backends:
description: A list of configurations related to Backends that are part of a backend set.
required: false
suboptions:
ip_address:
description: IP address of the backend server.
required: true
port:
description: The communication port for the backend server
required: true
backup:
description: Specifies whether the load balancer should treat this server as a backup
unit. If true, the load balancer forwards no ingress traffic to this backend
server unless all other backend servers not marked as "backup" fail the health
check policy.
required: false
default: False
drain:
description: Specifies whether the load balancer should drain this server. Servers
marked "drain" receive no new incoming traffic.
required: false
default: False
offline:
description: Ensures whether the load balancer should treat this server as offline.
Offline servers receive no incoming traffic.
required: false
default: False
weight:
description: Describes the load balancing policy weight assigned to the server.
Backend servers with a higher weight receive a larger proportion of incoming
traffic. For example, a server weighted '3' receives 3 times the number of new
connections as a server weighted '1'.
required: false
default: 1
health_checker:
description: Describes the health check policy for a backend set.
required: false
suboptions:
interval_in_millis:
description: Describes the interval between health checks, in milliseconds.
required: false
default: 10000
port:
description: Describes the backend server port against which to run the
health check. If the port is not specified, the load balancer
uses the port information from the backends.
required: false
default: 0
protocol:
description: Describes the protocol the health check must use, either HTTP or TCP.
required: true
choices: ['HTTP', 'TCP']
response_body_regex:
description: Describes a regular expression for parsing the response body from the
backend server.
required: false
default: '.*'
retries:
description: Describes the number of retries to attempt before a backend
server is considered unhealthy.
required: false
default: 3
return_code:
description: Describes the status code a healthy backend server should return.
required: false
default: 200
timeout_in_millis:
description: Describes the maximum time, in milliseconds, to wait for a reply to
a health check. A health check is successful only if a reply returns
within this timeout period.
required: false
default: 3000
url_path:
description: Describes the path against which to run the health check.
required: true
session_persistence_configuration:
description: The configuration details for implementing session persistence. Session
persistence enables the Load Balancing Service to direct any number of
requests that originate from a single logical client to a single backend
web server.
required: false
suboptions:
cookie_name:
description: Describes the name of the cookie used to detect a session initiated by the
backend server. Use '*' to specify that any cookie set by the backend causes
the session to persist.
required: true
disable_fallback:
description: DescribesWhether the load balancer is prevented from directing traffic from a
persistent session client to a different backend server if the original server
is unavailable.
required: false
default: False
ssl_configuration:
description: The load balancer's SSL handling configuration details.
required: false
suboptions:
certificate_name:
description: Describes a friendly name for the certificate bundle. It must be unique
and it cannot be changed. Valid certificate bundle names include only alphanumeric
characters, dashes, and underscores.Certificate bundle names cannot contain spaces.
required: true
verify_depth:
description: Describes the maximum depth for peer certificate chain verification.
required: false
verify_peer_certificate:
description: Describeswhether the load balancer listener should verify peer certificates.
required: false
purge_backends:
description: Purge any backends in the Backend Set named I(name) that is not specified in I(backends).
If I(purge_backends=no), provided backends would be appended to existing backends.
I(purge_backends) and I(delete_backends) are mutually exclusive.
required: false
default: 'yes'
type: bool
delete_backends:
description: Delete any backends in the Backend Set named I(name) that is specified in I(backends).
If I(delete_backends=yes), backends provided by I(backends) would be deleted from existing
backends, if they are part of existing backends. If they are not part of existing backends,
they will be ignored. I(delete_backends) and I(purge_backends) are mutually exclusive.
required: false
default: 'no'
type: bool
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_wait_options ]
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# Create Create a backend set named "ansible_backend_set" in a load balancer
- name: Create Load Balancer Backend Set
oci_load_balancer_backend_set:
name: "ansible_backend_set"
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backends:
- ip_address: "10.159.34.21"
port: 8080
health_checker:
interval_in_millis: 30000
port: 8080
protocol: "HTTP"
response_body_regex: "^(500|40[1348])$"
retries: 3
timeout_in_millis: 6000
return_code: 200
url_path: "/healthcheck"
policy: "LEAST_CONNECTIONS"
session_persistence_configuration:
cookie_name: "ansible_backend_set_cookie"
disable_fallback: True
ssl_configuration:
certificate_name: "certs1"
verify_depth: 3
verify_peer_certificate: True
state: 'present'
# Update Load Balancer Backend Set
- name: Update Load Balancer Backend Set
oci_load_balancer_backend_set:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
name: "ansible_backend_set"
backends:
- ip_address: "10.159.34.25"
port: 8282
purge_backends: 'no'
state: 'present'
# Update Load Balancer Backend Set by deleting backends
- name: Update Load Balancer Backend Set by deleting backends
oci_load_balancer_backend_set:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
name: "ansible_backend_set"
backends:
- ip_address: "10.159.34.25"
port: 8282
delete_backends: 'yes'
state: 'present'
# Deleted Load Balancer Backend Set
- name: Update Load Balancer Backend Set
oci_load_balancer_backend_set:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
name: "ansible_backend_set"
state: 'absent'
"""
RETURN = """
backend_set:
description: Attributes of the created/updated Load Balancer Backend Set.
For delete, deleted Load Balancer Backend Set description will
be returned.
returned: success
type: complex
contains:
backends:
description: A list of configurations related to Backends that are part of the backend set
returned: always
type: list
sample: [
{
"backup": false,
"drain": false,
"ip_address": "10.159.34.21",
"name": "10.159.34.21:8080",
"offline": false,
"port": 8080,
"weight": 1
},
{
"backup": false,
"drain": false,
"ip_address": "10.159.34.21",
"name": "10.159.34.21:8282",
"offline": false,
"port": 8282,
"weight": 1
}
]
name:
description: Name assigned to the Load Balancer Backend Set during creation
returned: always
type: string
sample: ansible_backend_set
health_checker:
description: Health check policy for a backend set.
returned: always
type: dict
sample: {
"interval_in_millis": 30000,
"port": 8080,
"protocol": "HTTP",
"response_body_regex": "^(500|40[1348])$",
"retries": 3,
"return_code": 200,
"timeout_in_millis": 6000,
"url_path": "/healthcheck"
}
policy:
description: The load balancer policy for the backend set.
returned: always
type: string
sample: LEAST_CONNECTIONS
session_persistence_configuration:
description: The configuration details for implementing session persistence
returned: always
type: dict
sample: {
"cookie_name": "first_backend_set_cookie",
"disable_fallback": true
}
ssl_configuration:
description: The load balancer's SSL handling configuration details.
returned: always
type: dict
sample: {
"certificate_name": "certs1",
"verify_depth": 1,
"verify_peer_certificate": true
}
sample: {"backends": [
{
"backup": false,
"drain": false,
"ip_address": "10.159.34.21",
"name": "10.159.34.21:8080",
"offline": false,
"port": 8080,
"weight": 1
},
{
"backup": false,
"drain": false,
"ip_address": "10.159.34.21",
"name": "10.159.34.21:8282",
"offline": false,
"port": 8282,
"weight": 1
}
],
"health_checker": {
"interval_in_millis": 30000,
"port": 8080,
"protocol": "HTTP",
"response_body_regex": "^(500|40[1348])$",
"retries": 3,
"return_code": 500,
"timeout_in_millis": 6000,
"url_path": "/healthcheck"
},
"name": "backend_set_1",
"policy": "IP_HASH",
"session_persistence_configuration": {
"cookie_name": "first_backend_set_cookie_updated",
"disable_fallback": true
},
"ssl_configuration": {
"certificate_name": "certs1",
"verify_depth": 1,
"verify_peer_certificate": true
}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils, oci_lb_utils
try:
from oci.load_balancer.load_balancer_client import LoadBalancerClient
from oci.exceptions import | |
"""
specter.util.util
=================
Utility functions and classes for specter
<NAME>
Fall 2012
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import math
import numpy as np
import scipy.signal
from scipy.special import legendre
from scipy.sparse import spdiags
from scipy.signal import convolve, convolve2d
from specter.util import pixspline
from time import time
_t0 = 0.0
def _timeit():
global _t0
tx = time()
dt = tx - _t0
_t0 = tx
return dt
#- 2D Linear interpolator
class LinearInterp2D(object):
"""
Linear interpolation on a 2D grid. Allows values to be interpolated
to be multi-dimensional.
"""
def __init__(self, x, y, data):
"""
x : array of x coordinates
y : array of y coordinates
data[ix, iy, ...] : 3 or more dimensional array of data to interpolate
first two coordinates are x and y
"""
self.x = np.array(x)
self.y = np.array(y)
self.data = np.array(data)
def __call__(self, x, y):
"""
Evaluate data at (x,y)
"""
#- TODO: compare speed to solution at
#- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python
#- Find where we are in grid
#- clip to 1 because we will use i and i-1
#- clip to len(x)-1 to allow extrapolation beyond grid boundary
ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)
iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)
#- Interpolation distances from points
dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])
dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])
#- Interpolate, allowing x and/or y to be multi-dimensional
#- NOTE: these are the slow steps, about equal time each
#- Original code with what appears to be vestigial transposes
# data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T
# data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T
# dataxy = (data1.T*(1-dy) + data2.T*dy).T
#- Updated without transposes
data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)
data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)
dataxy = (data1*(1-dy) + data2*dy)
return dataxy
def rebin_image(image, n):
"""
rebin 2D array pix into bins of size n x n
New binsize must be evenly divisible into original pix image
"""
assert image.shape[0] % n == 0
assert image.shape[1] % n == 0
s = image.shape[0]//n, n, image.shape[1]//n, n
return image.reshape(s).sum(-1).sum(1)
#- Utility functions for sinc shifting pixelated PSFs
def _sincfunc(x, dx, dampfac=3.25):
"""sinc helper function for sincshift()"""
if dx != 0.0:
xx = (x+dx)*np.pi #- cache shifted array for 30% faster evals
return np.exp( -(xx/(dampfac*np.pi))**2 ) * np.sin(xx) / xx
else:
xx = np.zeros(len(x))
xx[len(x)//2] = 1.0
return xx
#- Implementation note: the typical PSF image is 15x15.
#- fftconvolve is not faster than convolve for images this small
def sincshift(image, dx, dy, sincrad=10, dampfac=3.25):
"""
Return image shifted by dx, dy using sinc interpolation.
For speed, do each dimension independently which can introduce edge
effects. Also see sincshift2d().
"""
s = np.arange(-sincrad, sincrad+1.0)
imgshape = image.shape
if abs(dx) > 1e-6:
sincx = _sincfunc(s, -dx, dampfac=dampfac)
image = convolve(image.ravel(), sincx, mode='same')
image = image.reshape(imgshape)
if abs(dy) > 1e-6:
sincy = _sincfunc(s, -dy, dampfac=dampfac)
image = convolve(image.T.ravel(), sincy, mode='same')
image = image.reshape(imgshape[-1::-1]).T
return image
def sincshift2d(image, dx, dy, sincrad=10, dampfac=3.25):
"""
Return image shifted by dx, dy using full 2D sinc interpolation
"""
s = np.arange(-sincrad, sincrad+1.0)
sincx = _sincfunc(s, -dx, dampfac=dampfac)
sincy = _sincfunc(s, -dy, dampfac=dampfac)
kernel = np.outer(sincy, sincx)
newimage = convolve2d(image, kernel, mode='same')
return newimage
from scipy.special import erf
def gaussint(x, mean=0.0, sigma=1.0):
"""
Return integral from -inf to x of normalized Gaussian with mean and sigma
"""
z = (x - mean) / (math.sqrt(2) * sigma)
return (erf(z) + 1.0) / 2.0
def gausspix(x, mean=0.0, sigma=1.0):
"""
Return Gaussian(mean,sigma) integrated over unit width pixels centered at x[].
"""
edges = np.concatenate((x-0.5, x[-1:]+0.5))
integrals = gaussint(edges, mean=mean, sigma=sigma)
return integrals[1:] - integrals[0:-1]
def weighted_solve(A, b, w):
"""
Solve `A x = b` with weights `w` on `b`
Returns x, inverseCovarance(x)
"""
assert len(b) == len(w)
n = len(b)
W = spdiags(w, [0,], n, n)
y = A.T.dot(W.dot(b))
iCov = A.T.dot(W.dot(A))
x = np.linalg.lstsq(iCov, y)[0]
return x, iCov
def trapz(edges, xp, yp):
"""
Perform trapezoidal integration between edges using sampled function
yp vs. xp. Returns array of length len(edges)-1.
Input xp array must be sorted in ascending order.
See also numpy.trapz, which integrates a single array
"""
if np.any(np.diff(xp) < 0.0):
raise ValueError("Input x must be sorted in increasing order")
if len(xp) != len(yp):
raise ValueError("xp and yp must have same length")
yedge = np.interp(edges, xp, yp)
result = np.zeros(len(edges)-1)
iedge = np.searchsorted(xp, edges)
for i in range(len(edges)-1):
ilo, ihi = iedge[i], iedge[i+1]
xx = np.concatenate( (edges[i:i+1], xp[ilo:ihi], edges[i+1:i+2]) )
yy = np.concatenate( (yedge[i:i+1], yp[ilo:ihi], yedge[i+1:i+2]) )
result[i] = np.trapz(yy, xx)
return result
def resample(x, xp, yp, xedges=False, xpedges=False):
"""
IN PROGRESS. Resample a spectrum to a new binning using PixelSpline
1 <= x.ndim <= xp.ndim <= yp.ndim <= 2
"""
assert 1 <= x.ndim
assert x.ndim <= xp.ndim
assert xp.ndim <= yp.ndim
assert yp.ndim <= 2
input_edges = xp if xpedges else pixspline.cen2bound(xp)
ys = pixspline.PixelSpline(input_edges, yp)
edges = x if xedges else pixspline.cen2bound(x)
return ys.resample(edges)
#- Faster versions than np.outer, which has to do type checking and raveling
try:
#- ~3x faster if numba is installed
if 'NUMBA_DISABLE_JIT' in os.environ:
raise ImportError
import numba
@numba.jit
def outer(x, y, out):
for i in range(len(x)):
for j in range(len(y)):
out[i,j] = x[i] * y[j]
return out
except ImportError:
#- 1.5x faster otherwise
def outer(x, y, out):
return np.multiply(x[:, None], y[None, :], out)
# Much faster than numpy.polynomial.legendre.legval, but doesn't work with scalars
import numba
@numba.jit(nopython=True,cache=False)
def legval_numba(x, c):
nd=len(c)
ndd=nd
xlen = x.size
c0=c[-2]*np.ones(xlen)
c1=c[-1]*np.ones(xlen)
for i in range(3, ndd + 1):
tmp = c0
nd = nd - 1
nd_inv = 1/nd
c0 = c[-i] - (c1*(nd - 1))*nd_inv
c1 = tmp + (c1*x*(2*nd - 1))*nd_inv
return c0 + c1*x
@numba.jit(nopython=True, cache=False)
def custom_hermitenorm(n, u):
"""
Custom implementation of scipy.special.hermitenorm to enable jit-compiling
with Numba (which as of 10/2018 does not support scipy). This functionality
is equivalent to:
fn = scipy.special.hermitenorm(n)
return fn(u)
with the exception that scalar values of u are not supported.
Inputs:
n: the degree of the hermite polynomial
u: (requires array) points at which the polynomial will be evaulated.
Outputs:
res: the value of the hermite polynomial at array points(u)
"""
#below is (mostly) cut and paste from scipy orthogonal_eval.pxd
#some modifications have been made to operate on an array
#rather than a single value (as in the original version)
res=np.zeros(len(u))
if n < 0:
return (0.0)*np.ones(len(u))
elif n == 0:
return (1.0)*np.ones(len(u))
elif n == 1:
return u
else:
y3 = 0.0
y2 = 1.0
for i,x in enumerate(u):
for k in range(n, 1, -1):
y1 = x*y2-k*y3
y3 = y2
y2 = y1
res[i]=x*y2-y3
#have to reset before the next iteration
y3 = 0.0
y2 = 1.0
return res
@numba.jit(nopython=True, cache=False)
def custom_erf(y):
"""Custom implementation of :func:`scipy.special.erf` to enable jit-compiling
with Numba (which as of 10/2018 does not support scipy). This functionality is equilvalent to::
scipy.special.erf(y)
with the exception that scalar values of y are not supported.
Parameters
----------
y : array-like
Points at which the error function will be evaluated.
Returns
-------
array-like
The value of the error function at points in array `y`.
Notes
-----
This function has been translated from the original fortran function
to Python. The original scipy erf function can be found at:
https://github.com/scipy/scipy/blob/8dba340293fe20e62e173bdf2c10ae208286692f/scipy/special/cdflib/erf.f
Note that this new function introduces a small amount of machine-precision numerical error
as compared to the original scipy function.
"""
#have to define a ton of constants
c=0.564189583547756E0
###
a1=0.771058495001320E-04
a2=-0.133733772997339E-02
a3=0.323076579225834E-01
a4=0.479137145607681E-01
a5=0.128379167095513E+00
###
b1=0.301048631703895E-02
b2=0.538971687740286E-01
b3=0.375795757275549E+00
###
p1=-1.36864857382717E-07
p2=5.64195517478974E-01
p3=7.21175825088309E+00
p4=4.31622272220567E+01
p5=1.52989285046940E+02
p6=3.39320816734344E+02
p7=4.51918953711873E+02
p8=3.00459261020162E+02
###
q1=1.00000000000000E+00
q2=1.27827273196294E+01
q3=7.70001529352295E+01
q4=2.77585444743988E+02
q5=6.38980264465631E+02
q6=9.31354094850610E+02
q7=7.90950925327898E+02
q8=3.00459260956983E+02
###
r1=2.10144126479064E+00
r2=2.62370141675169E+01
r3=2.13688200555087E+01
r4=4.65807828718470E+00
r5=2.82094791773523E-01
###
s1=9.41537750555460E+01
s2=1.87114811799590E+02
s3=9.90191814623914E+01
s4=1.80124575948747E+01
###
#end of constants
#the orig version is meant for a single point
#need to modify to work on an array
erf = np.zeros(len(y))
for i,x in enumerate(y):
ax=abs(x)
#change gotos into something sensible
if ax <= 0.5E0:
t=x*x
top = ((((a1*t+a2)*t+a3)*t+a4)*t+a5) + 1.0E0
bot = ((b1*t+b2)*t+b3)*t + 1.0E0
erf[i] = x * (top/bot)
elif 0.5E0 < ax <= 4.0E0:
top = ((((((p1*ax+p2)*ax+p3)*ax+p4)*ax+p5)*ax+p6)*ax + p7)*ax + p8
bot = ((((((q1*ax+q2)*ax+q3)*ax+q4)*ax+q5)*ax+q6)*ax + q7)*ax + | |
n_iter=2000,
generate_prior_predictive=False,
random_state=None):
"""Sample from prior for a fixed model."""
rng = check_random_state(random_state)
y, X = patsy.dmatrices(formula_like, data=data)
y, X = _check_design_matrices(y, X)
outcome_names = y.design_info.column_names
coef_names = [rdu.get_default_coefficient_name(n)
for n in X.design_info.column_names]
n_coefs = len(coef_names)
beta, tau_sq, lp = _sample_parameters_conjugate_priors(
n_coefs, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
size=n_iter, random_state=rng)
chains = collections.OrderedDict({'tau_sq': tau_sq})
for j, t in enumerate(coef_names):
chains[t] = beta[:, j]
chains['lp__'] = lp
outcome_chains = None
if generate_prior_predictive:
sampled_outcomes, _ = _sample_outcomes(
X, beta, tau_sq, random_state=rng)
outcome_chains = collections.OrderedDict(
{n: sampled_outcomes[..., i]
for i, n in enumerate(outcome_names)})
args = {'random_state': random_state, 'n_iter': n_iter}
results = {'chains': chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float),
'mean_lp__': np.mean(chains['lp__'])}
prior_predictive = None
if generate_prior_predictive:
prior_predictive = {
'chains': outcome_chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float)
}
return results, prior_predictive
def _sample_posterior_fixed_model(formula_like, data=None,
a_tau=1.0, b_tau=1.0, nu_sq=1.0,
n_iter=2000, thin=1,
generate_posterior_predictive=False,
random_state=None):
"""Sample from posterior for a fixed model."""
rng = check_random_state(random_state)
y, X = patsy.dmatrices(formula_like, data=data)
y, X = _check_design_matrices(y, X)
outcome_names = y.design_info.column_names
coef_names = [rdu.get_default_coefficient_name(n)
for n in X.design_info.column_names]
beta, tau_sq, lp = _sample_parameters_posterior(
y, X, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
size=n_iter, random_state=rng)
chains = collections.OrderedDict({'tau_sq': tau_sq[::thin]})
for j, t in enumerate(coef_names):
chains[t] = beta[::thin, j]
chains['lp__'] = lp[::thin]
outcome_chains = None
if generate_posterior_predictive:
sampled_outcomes, _ = _sample_outcomes(
X, beta, tau_sq, random_state=rng)
outcome_chains = collections.OrderedDict(
{n: sampled_outcomes[::thin, ..., i]
for i, n in enumerate(outcome_names)})
args = {'random_state': random_state, 'n_iter': n_iter,
'thin': thin}
results = {'chains': chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float),
'mean_lp__': np.mean(chains['lp__'])}
posterior_predictive = None
if generate_posterior_predictive:
posterior_predictive = {
'chains': outcome_chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float)
}
return results, posterior_predictive
def _get_structure_samples_posterior_predictive(fit, formula_like, data=None,
a_tau=1.0, b_tau=1.0,
nu_sq=1.0,
force_intercept=True,
random_state=None):
"""Get posterior predictive samples corresponding to structure sample."""
rng = check_random_state(random_state)
y, X = patsy.dmatrices(formula_like, data=data)
y, X = _check_design_matrices(y, X)
lhs_terms = y.design_info.terms
outcome_names = y.design_info.column_names
optional_terms, optional_term_names = _get_optional_terms(
X.design_info.terms, term_names=X.design_info.term_names,
force_intercept=force_intercept)
n_outcomes = len(outcome_names)
max_nonzero = fit['max_nonzero']
allow_exchanges = fit['samples'][0]['args']['allow_exchanges']
n_chains = len(fit['samples'])
n_save = fit['n_save']
thin = fit['thin']
random_seeds = rng.choice(1000000 * n_chains,
size=n_chains, replace=False)
samples = []
for i in range(n_chains):
chain_rng = check_random_state(random_seeds[i])
chain_k = fit['samples'][i]['chains']['k']
sampled_outcomes = None
for j in range(n_save[i]):
model_desc = _get_model_description(
chain_k[j], lhs_terms=lhs_terms,
optional_terms=optional_terms,
force_intercept=force_intercept)
_, model_sample = _sample_posterior_fixed_model(
model_desc, data=data, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
n_iter=1,
generate_posterior_predictive=True,
random_state=chain_rng)
if sampled_outcomes is None:
n_samples = model_sample['chains'][outcome_names[0]].shape[0]
sampled_outcomes = np.zeros((n_save[i], n_samples, n_outcomes))
for m, n in enumerate(outcome_names):
sampled_outcomes[j, ..., m] = model_sample['chains'][n]
chains = collections.OrderedDict(
{n: sampled_outcomes[..., m]
for m, n in enumerate(outcome_names)})
args = {'random_state': random_seeds[i], 'n_iter': fit['n_iter'],
'thin': thin,
'allow_exchanges': allow_exchanges,
'max_nonzero': max_nonzero}
samples.append({'chains': chains,
'args': args,
'acceptance': fit['samples'][i]['acceptance'],
'accept_stat': fit['samples'][i]['accept_stat']})
posterior_predictive = {
'samples': samples,
'n_chains': n_chains,
'n_iter': fit['n_iter'],
'warmup': fit['warmup'],
'thin': fit['thin'],
'n_save': fit['n_save'],
'warmup2': fit['warmup2'],
'max_nonzero': fit['max_nonzero'],
'permutation': fit['permutation'],
'random_seeds': random_seeds
}
return posterior_predictive
def _sample_prior_full(formula_like, data=None,
a_tau=1.0, b_tau=1.0, nu_sq=1.0,
max_terms=None, n_iter=2000,
force_intercept=True,
generate_prior_predictive=False,
random_state=None):
"""Sample from prior over all models."""
rng = check_random_state(random_state)
lhs_terms, outcome_names, optional_terms, optional_term_names = \
_get_outcome_and_optional_terms(
formula_like, data=data, force_intercept=force_intercept)
n_outcomes = len(outcome_names)
n_terms = len(optional_terms)
max_terms = check_max_nonzero_indicators(
max_terms, n_indicators=n_terms)
k = np.zeros((n_iter, n_terms), dtype=int)
named_indicators = {
rdu.get_default_indicator_name(t): np.zeros((n_iter,), dtype=int)
for t in optional_term_names}
lp = np.empty((n_iter,))
sampled_outcomes = None
for i in range(n_iter):
k[i], lp[i] = _sample_uniform_structures_prior(
n_terms, max_terms=max_terms, random_state=rng)
for m in range(n_terms):
if k[i, m] == 1:
ind = rdu.get_default_indicator_name(
optional_term_names[m])
named_indicators[ind][i] = 1
if generate_prior_predictive:
model_desc = _get_model_description(
k[i], lhs_terms=lhs_terms, optional_terms=optional_terms,
force_intercept=force_intercept)
_, model_sample = _sample_prior_fixed_model(
model_desc, data=data, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
n_iter=1,
generate_prior_predictive=generate_prior_predictive,
random_state=rng)
if sampled_outcomes is None:
n_samples = model_sample['chains'][outcome_names[0]].shape[0]
sampled_outcomes = np.zeros((n_iter, n_samples, n_outcomes))
for j, n in enumerate(outcome_names):
sampled_outcomes[i, ..., j] = model_sample['chains'][n]
chains = collections.OrderedDict({'k': k})
for ind in named_indicators:
chains[ind] = named_indicators[ind]
chains['lp__'] = lp
args = {'random_state': random_state, 'n_iter': n_iter,
'max_nonzero': max_terms}
results = {'chains': chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float),
'mean_lp__': np.mean(chains['lp__'])}
prior_predictive = None
if generate_prior_predictive:
outcome_chains = collections.OrderedDict(
{n: sampled_outcomes[..., i]
for i, n in enumerate(outcome_names)})
prior_predictive = {
'chains': outcome_chains,
'args': args,
'acceptance': 1.0,
'accept_stat': np.ones((n_iter,), dtype=float)
}
return results, prior_predictive
def _sample_posterior_full(formula_like, data=None,
a_tau=1.0, b_tau=1.0, nu_sq=1.0,
n_chains=4, n_iter=1000, warmup=None, thin=1,
verbose=False,
n_jobs=-1, max_terms=None,
restart_file=None, init='random',
allow_exchanges=True,
generate_posterior_predictive=False,
force_intercept=True,
random_state=None):
"""Sample from model posterior using MC3 algorithm."""
rng = check_random_state(random_state)
y, X = patsy.dmatrices(formula_like, data=data)
y, X = _check_design_matrices(y, X)
outcome_names = y.design_info.column_names
optional_terms, optional_term_names = _get_optional_terms(
X.design_info.terms, term_names=X.design_info.term_names,
force_intercept=force_intercept)
constant_data_names = None
if data is not None:
constant_data_names = [n for n in data if n not in outcome_names]
n_terms = len(optional_terms)
n_chains = rdu.check_number_of_chains(n_chains)
if restart_file is None:
initial_k = initialize_stepwise_mc3(
n_terms, n_chains=n_chains, max_nonzero=max_terms, method=init,
random_state=rng)
else:
restart_fit = az.from_netcdf(restart_file)
if n_chains != restart_fit.posterior.sizes['chain']:
warnings.warn(
'Number of saved chains does not match number '
'of requested chains '
'(got n_chains=%d but saved n_chains=%d)' %
(n_chains, restart_fit.posterior.sizes['chain']),
UserWarning)
n_chains = restart_fit.posterior.sizes['chain']
initial_k = restart_fit.posterior['k'].isel(draw=-1).data.copy()
def logp(k, data):
return bayes_regression_log_model_posterior(
k, data=data, max_terms=max_terms,
a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
formula_like=formula_like,
force_intercept=force_intercept)
fit = sample_stepwise_mc3(
initial_k, logp, data=data,
n_chains=n_chains, n_iter=n_iter, thin=thin,
warmup=warmup, verbose=verbose, n_jobs=n_jobs,
max_nonzero=max_terms, allow_exchanges=allow_exchanges,
random_state=rng)
# Generate named indicator variables for convenience.
fit = _add_named_indicator_variables_to_fit(
fit, term_names=optional_term_names)
coords = {'term': optional_term_names}
dims = {'k': ['term']}
posterior_predictive = None
if generate_posterior_predictive:
posterior_predictive = _get_structure_samples_posterior_predictive(
fit, formula_like, data=data,
a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
force_intercept=force_intercept, random_state=rng)
return convert_samples_dict_to_inference_data(
posterior=fit, posterior_predictive=posterior_predictive,
observed_data=data, constant_data=data, save_warmup=True,
observed_data_names=outcome_names,
constant_data_names=constant_data_names,
coords=coords, dims=dims)
def _unique_models(sample_ds, indicator_var='k'):
"""Get list of unique models."""
n_total = sample_ds.sizes['chain'] * sample_ds.sizes['draw']
indicator_shape = sample_ds[indicator_var].shape
if len(indicator_shape) > 2:
n_indicators = indicator_shape[-1]
flat_indicators = np.reshape(
sample_ds[indicator_var].data, (n_total, n_indicators))
else:
flat_indicators = np.reshape(
sample_ds[indicator_var].data, (n_total, 1))
return np.unique(flat_indicators, axis=0)
def _count_possible_models(sample_ds, max_nonzero=None, indicator_var='k'):
"""Count the number of possible models."""
indicator_shape = sample_ds[indicator_var].shape
if len(indicator_shape) > 2:
n_indicators = indicator_shape[-1]
else:
n_indicators = 1
if max_nonzero is None:
max_nonzero = n_indicators
return int(sum([sp.comb(n_indicators, i)
for i in range(max_nonzero + 1)]))
def _get_model_lookup(sample_ds, indicator_var='k'):
"""Get lookup table for model labels."""
unique_k = _unique_models(sample_ds, indicator_var=indicator_var)
return {tuple(ki): i
for i, ki in enumerate(unique_k)}
def _get_model_indicators(sample_ds, only_sampled_models=True,
max_nonzero=None, indicator_var='k'):
"""Get indicator variables for individual models."""
model_lookup = _get_model_lookup(sample_ds, indicator_var=indicator_var)
n_chains = sample_ds.sizes['chain']
n_draws = sample_ds.sizes['draw']
z = np.empty((n_chains, n_draws), dtype=np.uint64)
for i in range(n_chains):
chain_k = sample_ds[indicator_var].isel(chain=i).data
for t in range(n_draws):
z[i, t] = model_lookup[tuple(chain_k[t])]
model_indicators = [model_lookup[i] for i in model_lookup]
if not only_sampled_models:
n_observed_models = len(model_indicators)
n_possible_models = _count_possible_models(
sample_ds, max_nonzero=max_nonzero, indicator_var=indicator_var)
model_indicators += [i for i in range(n_observed_models,
n_possible_models)]
return z, model_indicators
def structure_sample_convergence_rate(sample_ds, max_nonzero=None,
indicator_var='k', sparse=True,
combine_chains=False):
"""Estimate convergence rate for structure chains."""
z, model_indicators = _get_model_indicators(
sample_ds, max_nonzero=max_nonzero,
only_sampled_models=True,
indicator_var=indicator_var)
return estimate_convergence_rate(z, sparse=sparse,
combine_chains=combine_chains)
def structure_sample_chi2_convergence_diagnostics(fit, max_nonzero=None,
indicator_var='k',
batch=True, **kwargs):
"""Calculate chi squared convergence diagnostics."""
if batch and hasattr(fit, 'warmup_posterior'):
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
else:
samples = fit.posterior
z, _ = _get_model_indicators(
samples, max_nonzero=max_nonzero,
only_sampled_models=True,
indicator_var=indicator_var)
if batch:
return rjmcmc_batch_chisq_convergence(
z, **kwargs)
return rjmcmc_chisq_convergence(
z, **kwargs)
def structure_sample_marginal_chi2(fit, max_nonzero=None, batch=True,
indicator_var='k', **kwargs):
"""Calculate convergence diagnostics for model averaged indicators."""
if batch and hasattr(fit, 'warmup_posterior'):
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
else:
samples = fit.posterior
term_names = samples.term.data
results = {}
for t in term_names:
z = samples[indicator_var].sel(term=t).astype(int).data
if batch:
results[t] = rjmcmc_batch_chisq_convergence(z, **kwargs)
else:
results[t] = rjmcmc_chisq_convergence(z, **kwargs)
return results
def structure_sample_ks_convergence_diagnostics(fit, max_nonzero=None,
indicator_var='k',
batch=True, **kwargs):
"""Calculate chi squared convergence diagnostics."""
if batch and hasattr(fit, 'warmup_posterior'):
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
else:
samples = fit.posterior
z, _ = _get_model_indicators(
samples, max_nonzero=max_nonzero,
only_sampled_models=True,
indicator_var=indicator_var)
if batch:
return rjmcmc_batch_kstest_convergence(
z, **kwargs)
return rjmcmc_kstest_convergence(
z, **kwargs)
def structure_sample_marginal_ks(fit, max_nonzero=None, batch=True,
indicator_var='k', **kwargs):
"""Calculate convergence diagnostics for model averaged indicators."""
if batch and hasattr(fit, 'warmup_posterior'):
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
else:
samples = fit.posterior
term_names = samples.term.data
results = {}
for t in term_names:
z = samples[indicator_var].sel(term=t).astype(int).data
if batch:
results[t] = rjmcmc_batch_kstest_convergence(z, **kwargs)
else:
results[t] = rjmcmc_kstest_convergence(z, **kwargs)
return results
def structure_sample_diagnostics(sample_ds, max_nonzero=None,
n_samples=100, only_sampled_models=True,
epsilon=None, sparse=True,
min_epsilon=1e-6, tolerance=1e-4,
fit_kwargs=None, indicator_var='k',
random_state=None):
z, model_indicators = _get_model_indicators(
sample_ds, max_nonzero=max_nonzero,
only_sampled_models=only_sampled_models,
indicator_var=indicator_var)
return estimate_stationary_distribution(
z, model_indicators=model_indicators, sparse=sparse,
epsilon=epsilon, n_samples=n_samples,
min_epsilon=min_epsilon, tolerance=tolerance,
fit_kwargs=fit_kwargs, random_state=random_state)
def _get_model_indicator_arrays(sample_ds, indicator_var='k'):
"""Get values of indicator variables in each model."""
indicator_shape = sample_ds[indicator_var].shape
if len(indicator_shape) > 2:
n_indicators = indicator_shape[-1]
else:
n_indicators = 1
term_names = | |
<reponame>berryman121/faxplus-python<gh_stars>1-10
# coding: utf-8
"""
FAX.PLUS REST API
This is the fax.plus API v1 developed for third party developers and organizations. In order to have a better coding experience with this API, let's quickly go through some points:<br /><br /> - This API assumes **/accounts** as an entry point with the base url of **https://restapi.fax.plus/v1**. <br /><br /> - This API treats all date and times sent to it in requests as **UTC**. Also, all dates and times returned in responses are in **UTC**<br /><br /> - Once you have an access_token, you can easily send a request to the resource server with the base url of **https://restapi.fax.plus/v1** to access your permitted resources. As an example to get the user's profile info you would send a request to **https://restapi.fax.plus/v1/accounts/self** when **Authorization** header is set to \"Bearer YOUR_ACCESS_TOKEN\" and custom header of **x-fax-clientid** is set to YOUR_CLIENT_ID # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from faxplus.models.account_settings_send_fax_retry import AccountSettingsSendFaxRetry # noqa: F401,E501
from faxplus.models.outbox_file_changes import OutboxFileChanges # noqa: F401,E501
from faxplus.models.outbox_initiated_from import OutboxInitiatedFrom # noqa: F401,E501
from faxplus.models.outbox_status_changes import OutboxStatusChanges # noqa: F401,E501
from faxplus.models.payload_outbox_comment import PayloadOutboxComment # noqa: F401,E501
class Outbox(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'files': 'list[str]',
'src': 'str',
'retry': 'AccountSettingsSendFaxRetry',
'should_enhance': 'bool',
'uid': 'str',
'designated_src': 'str',
'ip': 'str',
'page_count': 'int',
'comment': 'PayloadOutboxComment',
'id': 'str',
'file_changes': 'list[OutboxFileChanges]',
'to': 'list[str]',
'status': 'str',
'status_changes': 'list[OutboxStatusChanges]',
'contact_name': 'str',
'send_time': 'str',
'initiated_from': 'OutboxInitiatedFrom',
'submit_time': 'str',
'last_updated_status_time': 'str',
'options': 'object',
'extra_info': 'object'
}
attribute_map = {
'files': 'files',
'src': 'src',
'retry': 'retry',
'should_enhance': 'should_enhance',
'uid': 'uid',
'designated_src': 'designated_src',
'ip': 'ip',
'page_count': 'page_count',
'comment': 'comment',
'id': 'id',
'file_changes': 'file_changes',
'to': 'to',
'status': 'status',
'status_changes': 'status_changes',
'contact_name': 'contact_name',
'send_time': 'send_time',
'initiated_from': 'initiated_from',
'submit_time': 'submit_time',
'last_updated_status_time': 'last_updated_status_time',
'options': 'options',
'extra_info': 'extra_info'
}
def __init__(self, files=None, src=None, retry=None, should_enhance=None, uid=None, designated_src=None, ip=None, page_count=None, comment=None, id=None, file_changes=None, to=None, status=None, status_changes=None, contact_name=None, send_time=None, initiated_from=None, submit_time=None, last_updated_status_time=None, options=None, extra_info=None): # noqa: E501
"""Outbox - a model defined in Swagger""" # noqa: E501
self._files = None
self._src = None
self._retry = None
self._should_enhance = None
self._uid = None
self._designated_src = None
self._ip = None
self._page_count = None
self._comment = None
self._id = None
self._file_changes = None
self._to = None
self._status = None
self._status_changes = None
self._contact_name = None
self._send_time = None
self._initiated_from = None
self._submit_time = None
self._last_updated_status_time = None
self._options = None
self._extra_info = None
self.discriminator = None
if files is not None:
self.files = files
if src is not None:
self.src = src
if retry is not None:
self.retry = retry
if should_enhance is not None:
self.should_enhance = should_enhance
if uid is not None:
self.uid = uid
if designated_src is not None:
self.designated_src = designated_src
if ip is not None:
self.ip = ip
if page_count is not None:
self.page_count = page_count
if comment is not None:
self.comment = comment
if id is not None:
self.id = id
if file_changes is not None:
self.file_changes = file_changes
if to is not None:
self.to = to
if status is not None:
self.status = status
if status_changes is not None:
self.status_changes = status_changes
if contact_name is not None:
self.contact_name = contact_name
if send_time is not None:
self.send_time = send_time
if initiated_from is not None:
self.initiated_from = initiated_from
if submit_time is not None:
self.submit_time = submit_time
if last_updated_status_time is not None:
self.last_updated_status_time = last_updated_status_time
if options is not None:
self.options = options
if extra_info is not None:
self.extra_info = extra_info
@property
def files(self):
"""Gets the files of this Outbox. # noqa: E501
:return: The files of this Outbox. # noqa: E501
:rtype: list[str]
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this Outbox.
:param files: The files of this Outbox. # noqa: E501
:type: list[str]
"""
self._files = files
@property
def src(self):
"""Gets the src of this Outbox. # noqa: E501
:return: The src of this Outbox. # noqa: E501
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""Sets the src of this Outbox.
:param src: The src of this Outbox. # noqa: E501
:type: str
"""
self._src = src
@property
def retry(self):
"""Gets the retry of this Outbox. # noqa: E501
:return: The retry of this Outbox. # noqa: E501
:rtype: AccountSettingsSendFaxRetry
"""
return self._retry
@retry.setter
def retry(self, retry):
"""Sets the retry of this Outbox.
:param retry: The retry of this Outbox. # noqa: E501
:type: AccountSettingsSendFaxRetry
"""
self._retry = retry
@property
def should_enhance(self):
"""Gets the should_enhance of this Outbox. # noqa: E501
:return: The should_enhance of this Outbox. # noqa: E501
:rtype: bool
"""
return self._should_enhance
@should_enhance.setter
def should_enhance(self, should_enhance):
"""Sets the should_enhance of this Outbox.
:param should_enhance: The should_enhance of this Outbox. # noqa: E501
:type: bool
"""
self._should_enhance = should_enhance
@property
def uid(self):
"""Gets the uid of this Outbox. # noqa: E501
:return: The uid of this Outbox. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this Outbox.
:param uid: The uid of this Outbox. # noqa: E501
:type: str
"""
self._uid = uid
@property
def designated_src(self):
"""Gets the designated_src of this Outbox. # noqa: E501
:return: The designated_src of this Outbox. # noqa: E501
:rtype: str
"""
return self._designated_src
@designated_src.setter
def designated_src(self, designated_src):
"""Sets the designated_src of this Outbox.
:param designated_src: The designated_src of this Outbox. # noqa: E501
:type: str
"""
self._designated_src = designated_src
@property
def ip(self):
"""Gets the ip of this Outbox. # noqa: E501
:return: The ip of this Outbox. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this Outbox.
:param ip: The ip of this Outbox. # noqa: E501
:type: str
"""
self._ip = ip
@property
def page_count(self):
"""Gets the page_count of this Outbox. # noqa: E501
:return: The page_count of this Outbox. # noqa: E501
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""Sets the page_count of this Outbox.
:param page_count: The page_count of this Outbox. # noqa: E501
:type: int
"""
self._page_count = page_count
@property
def comment(self):
"""Gets the comment of this Outbox. # noqa: E501
:return: The comment of this Outbox. # noqa: E501
:rtype: PayloadOutboxComment
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this Outbox.
:param comment: The comment of this Outbox. # noqa: E501
:type: PayloadOutboxComment
"""
self._comment = comment
@property
def id(self):
"""Gets the id of this Outbox. # noqa: E501
:return: The id of this Outbox. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Outbox.
:param id: The id of this Outbox. # noqa: E501
:type: str
"""
self._id = id
@property
def file_changes(self):
"""Gets the file_changes of this Outbox. # noqa: E501
:return: The file_changes of this Outbox. # noqa: E501
:rtype: list[OutboxFileChanges]
"""
return self._file_changes
@file_changes.setter
def file_changes(self, file_changes):
"""Sets the file_changes of this Outbox.
:param file_changes: The file_changes of this Outbox. # noqa: E501
:type: list[OutboxFileChanges]
"""
self._file_changes = file_changes
@property
def to(self):
"""Gets the to of this Outbox. # noqa: E501
:return: The to of this Outbox. # noqa: E501
:rtype: list[str]
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this Outbox.
:param to: The to of this Outbox. # noqa: E501
:type: list[str]
"""
self._to = to
@property
def status(self):
"""Gets the status of this Outbox. # noqa: E501
:return: The status of this Outbox. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Outbox.
:param status: The status of this Outbox. # noqa: E501
:type: str
"""
self._status = status
| |
"""dRest core API connection library."""
import re
from . import interface, resource, request, serialization, meta, exc
from . import response
class API(meta.MetaMixin):
"""
The API class acts as a high level 'wrapper' around multiple lower level
handlers. Most of the meta arguments are optionally passed to one or
more handlers upon instantiation. All handler classes must be passed
*un-instantiated*.
Arguments:
baseurl
Translated to self.baseurl (for convenience).
Optional Arguments and Meta:
debug
Boolean. Toggle debug console output. Default: False.
baseurl
The base url to the API endpoint.
request_handler
The Request Handler class that performs the actual HTTP (or other)
requests. Default: drest.request.RequestHandler.
resource_handler
The Resource Handler class that is used when api.add_resource is
called. Default: drest.resource.ResourceHandler.
response_handler
An un-instantiated Response Handler class used to return
responses to the caller. Default: drest.response.ResponseHandler.
serialization_handler
An un-instantiated Serialization Handler class used to
serialize/deserialize data.
Default: drest.serialization.JsonSerializationHandler.
ignore_ssl_validation
Boolean. Whether or not to ignore ssl validation errors.
Default: False
serialize
Boolean. Whether or not to serialize data before sending
requests. Default: False.
deserialize
Boolean. Whether or not to deserialize data before returning
the Response object. Default: True.
trailing_slash
Boolean. Whether or not to append a trailing slash to the
request url. Default: True.
extra_headers
A dictionary of key value pairs that are added to the HTTP headers
of *every* request. Passed to request_handler.add_header().
extra_params
A dictionary of key value pairs that are added to the POST, or
'payload' data sent with *every* request. Passed to
request_handler.add_param().
extra_url_params
A dictionary of key value pairs that are added to the GET/URL
parameters of *every* request. Passed to
request_handler.add_extra_url_param().
timeout
The amount of seconds where a request should timeout. Default: 30
Usage
.. code-block:: python
import drest
# Create a generic client api object
api = drest.API('http://localhost:8000/api/v1/')
# Or something more customized:
api = drest.API(
baseurl='http://localhost:8000/api/v1/',
trailing_slash=False,
ignore_ssl_validation=True,
)
# Or even more so:
class MyAPI(drest.API):
class Meta:
baseurl = 'http://localhost:8000/api/v1/'
extra_headers = dict(MyKey='Some Value For Key')
extra_params = dict(some_param='some_value')
request_handler = MyCustomRequestHandler
api = MyAPI()
# By default, the API support HTTP Basic Auth with username/password.
api.auth('john.doe', 'password')
# Make calls openly
response = api.make_request('GET', '/users/1/')
# Or attach a resource
api.add_resource('users')
# Get available resources
api.resources
# Get all objects of a resource
response = api.users.get()
# Get a single resource with primary key '1'
response = api.users.get(1)
# Update a resource with primary key '1'
response = api.users.get(1)
updated_data = response.data.copy()
updated_data['first_name'] = 'John'
updated_data['last_name'] = 'Doe'
response = api.users.put(data['id'], updated_data)
# Create a resource
user_data = dict(
username='john.doe',
password='<PASSWORD>',
first_name='John',
last_name='Doe',
)
response = api.users.post(user_data)
# Delete a resource with primary key '1'
response = api.users.delete(1)
"""
class Meta:
baseurl = None
request_handler = request.RequestHandler
resource_handler = resource.RESTResourceHandler
extra_headers = {}
extra_params = {}
extra_url_params = {}
def __init__(self, baseurl=None, **kw):
if baseurl:
kw['baseurl'] = baseurl
super(API, self).__init__(**kw)
self.baseurl = self._meta.baseurl.strip('/')
self._resources = []
self._setup_request_handler(**kw)
def _setup_request_handler(self, **kw):
request.validate(self._meta.request_handler)
self.request = self._meta.request_handler(**kw)
# just makes things easier to be able to wrap meta under the api
# and pass it to the request handler.
for meta in dir(self._meta):
if meta.startswith('_'):
continue
if hasattr(self.request._meta, meta):
setattr(self.request._meta, meta, getattr(self._meta, meta))
for key in self._meta.extra_headers:
self.request.add_header(key, self._meta.extra_headers[key])
for key in self._meta.extra_params:
self.request.add_param(key, self._meta.extra_params[key])
for key in self._meta.extra_url_params:
self.request.add_url_param(key, self._meta.extra_url_params[key])
def auth(self, user, password, **kw):
"""
This authentication mechanism implements HTTP Basic Authentication.
Required Arguments:
user
The API username.
password
<PASSWORD>.
"""
self.request.set_auth_credentials(user, password)
def make_request(self, method, path, params={}, headers={}):
url = "%s/%s/" % (self.baseurl.strip('/'), path.strip('/'))
return self.request.make_request(method, url, params, headers)
@property
def resources(self):
return self._resources
def add_resource(self, name, resource_handler=None, path=None):
"""
Add a resource handler to the api object.
Required Arguments:
name
The name of the resource. This is generally the basic name
of the resource on the API. For example '/api/v0/users/'
would likely be called 'users' and will be accessible as
'api.users' from which additional calls can be made. For
example 'api.users.get()'.
Optional Arguments:
resource_handler
The resource handler class to use. Defaults to
self._meta.resource_handler.
path
The path to the resource on the API (after the base url).
Defaults to '/<name>/'.
Nested Resources:
It is possible to attach resources in a 'nested' fashion. For example
passing a name of 'my.nested.users' would be accessible as
api.my.nested.users.get().
Usage:
.. code-block:: python
api.add_resource('users')
response = api.users.get()
# Or for nested resources
api.add_resource('my.nested.users', path='/users/')
response = api.my.nested.users.get()
"""
safe_list = ['.', '_']
for char in name:
if char in safe_list:
continue
if not char.isalnum():
raise exc.dRestResourceError(
"resource name must be alpha-numeric."
)
if not path:
path = '%s' % name
else:
path = path.strip('/')
if not resource_handler:
resource_handler = self._meta.resource_handler
resource.validate(resource_handler)
handler = resource_handler(self, name, path)
if hasattr(self, name):
raise exc.dRestResourceError(
"The object '%s' already exist on '%s'" % (name, self))
# break up if nested
parts = name.split('.')
if len(parts) == 1:
setattr(self, name, handler)
elif len(parts) > 1:
first = parts.pop(0)
last = parts.pop()
# add the first object to self
setattr(self, first, resource.NestedResource())
first_obj = getattr(self, first)
current_obj = first_obj
# everything in between
for part in parts:
setattr(current_obj, part, resource.NestedResource())
current_obj = getattr(current_obj, part)
# add the actual resource to the chain of nested objects
setattr(current_obj, last, handler)
self._resources.append(name)
class TastyPieAPI(API):
"""
This class implements an API client, specifically tailored for
interfacing with `TastyPie <http://django-tastypie.readthedocs.org/en/latest>`_.
Optional / Meta Arguments:
auth_mech
The auth mechanism to use. One of ['basic', 'api_key'].
Default: 'api_key'.
auto_detect_resources
Boolean. Whether or not to auto detect, and add resource objects
to the api. Default: True.
Authentication Mechanisms
Currently the only supported authentication mechanism are:
* ApiKeyAuthentication
* BasicAuthentication
Usage
Please note that the following example use ficticious resource data.
What is returned, and sent to the API is unique to the API itself. Please
do not copy and paste any of the following directly without modifying the
request parameters per your use case.
Create the client object, and authenticate with a user/api_key pair by
default:
.. code-block:: python
import drest
api = drest.api.TastyPieAPI('http://localhost:8000/api/v0/')
api.auth('john.doe', '<PASSWORD>')
OR authenticate against HTTP Basic Auth:
.. code-block:: python
import drest
api = drest.api.TastyPieAPI('http://localhost:8000/api/v0/',
auth_mech='basic')
api.auth('john.doe', '<PASSWORD>')
As drest auto-detects TastyPie resources, you can view those at:
.. code-block:: python
api.resources
And access their schema:
.. code-block:: python
api.users.schema
As well as make the usual calls such as:
.. code-block:: python
api.users.get()
api.users.get(<pk>)
api.users.put(<pk>, data_dict)
api.users.post(data_dict)
api.users.delete(<pk>)
What about filtering? (these depend on how the `API is configured <http://django-tastypie.readthedocs.org/en/latest/resources.html#basic-filtering>`_):
.. code-block:: python
api.users.get(params=dict(username='admin'))
api.users.get(params=dict(username__icontains='admin'))
...
See :mod:`drest.api.API` for more standard usage examples.
"""
class Meta:
request_handler = request.TastyPieRequestHandler
resource_handler = resource.TastyPieResourceHandler
auto_detect_resources = True
auth_mech = 'api_key'
auth_mechanizms = ['api_key', 'basic']
def __init__(self, *args, **kw):
super(TastyPieAPI, self).__init__(*args, **kw)
if self._meta.auto_detect_resources:
self.find_resources()
def auth(self, *args, **kw):
"""
Authenticate the request, determined by Meta.auth_mech. Arguments
and Keyword arguments are just passed to the auth_mech function.
"""
if self._meta.auth_mech in self.auth_mechanizms:
func = getattr(self, '_auth_via_%s' % self._meta.auth_mech)
func(*args, **kw)
else:
raise exc.dRestAPIError("Unknown TastyPie auth mechanism.")
def _auth_via_basic(self, user, password, **kw):
"""
This is just a wrapper around drest.api.API.auth().
"""
return super(TastyPieAPI, self).auth(user, password)
def _auth_via_api_key(self, user, api_key, **kw):
"""
This authentication mechanism adds an Authorization header for
user/api_key per the
`TastyPie Documentation <http://django-tastypie.readthedocs.org/en/latest/authentication_authorization.html>`_.
Required Arguments:
| |
"""
PhysicalNode class for including real systems in the emulated network.
"""
import logging
import threading
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Tuple
from core.emulator.data import InterfaceData
from core.emulator.distributed import DistributedServer
from core.emulator.enumerations import NodeTypes, TransportType
from core.errors import CoreCommandError, CoreError
from core.executables import MOUNT, TEST, UMOUNT
from core.nodes.base import CoreNetworkBase, CoreNodeBase
from core.nodes.interface import DEFAULT_MTU, CoreInterface
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from core.emulator.session import Session
class PhysicalNode(CoreNodeBase):
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
directory: Path = None,
server: DistributedServer = None,
) -> None:
super().__init__(session, _id, name, server)
if not self.server:
raise CoreError("physical nodes must be assigned to a remote server")
self.directory: Optional[Path] = directory
self.lock: threading.RLock = threading.RLock()
self._mounts: List[Tuple[Path, Path]] = []
def startup(self) -> None:
with self.lock:
self.makenodedir()
self.up = True
def shutdown(self) -> None:
if not self.up:
return
with self.lock:
while self._mounts:
_, target_path = self._mounts.pop(-1)
self.umount(target_path)
for iface in self.get_ifaces():
iface.shutdown()
self.rmnodedir()
def path_exists(self, path: str) -> bool:
"""
Determines if a file or directory path exists.
:param path: path to file or directory
:return: True if path exists, False otherwise
"""
try:
self.host_cmd(f"{TEST} -e {path}")
return True
except CoreCommandError:
return False
def termcmdstring(self, sh: str = "/bin/sh") -> str:
"""
Create a terminal command string.
:param sh: shell to execute command in
:return: str
"""
return sh
def set_mac(self, iface_id: int, mac: str) -> None:
"""
Set mac address for an interface.
:param iface_id: index of interface to set hardware address for
:param mac: mac address to set
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
iface = self.get_iface(iface_id)
iface.set_mac(mac)
if self.up:
self.net_client.device_mac(iface.name, str(iface.mac))
def add_ip(self, iface_id: int, ip: str) -> None:
"""
Add an ip address to an interface in the format "10.0.0.1/24".
:param iface_id: id of interface to add address to
:param ip: address to add to interface
:return: nothing
:raises CoreError: when ip address provided is invalid
:raises CoreCommandError: when a non-zero exit status occurs
"""
iface = self.get_iface(iface_id)
iface.add_ip(ip)
if self.up:
self.net_client.create_address(iface.name, ip)
def remove_ip(self, iface_id: int, ip: str) -> None:
"""
Remove an ip address from an interface in the format "10.0.0.1/24".
:param iface_id: id of interface to delete address from
:param ip: ip address to remove from interface
:return: nothing
:raises CoreError: when ip address provided is invalid
:raises CoreCommandError: when a non-zero exit status occurs
"""
iface = self.get_iface(iface_id)
iface.remove_ip(ip)
if self.up:
self.net_client.delete_address(iface.name, ip)
def adopt_iface(
self, iface: CoreInterface, iface_id: int, mac: str, ips: List[str]
) -> None:
"""
When a link message is received linking this node to another part of
the emulation, no new interface is created; instead, adopt the
GreTap interface as the node interface.
"""
iface.name = f"gt{iface_id}"
iface.node = self
self.add_iface(iface, iface_id)
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
if self.up:
self.net_client.device_down(iface.localname)
self.net_client.device_name(iface.localname, iface.name)
iface.localname = iface.name
if mac:
self.set_mac(iface_id, mac)
for ip in ips:
self.add_ip(iface_id, ip)
if self.up:
self.net_client.device_up(iface.localname)
def next_iface_id(self) -> int:
with self.lock:
while self.iface_id in self.ifaces:
self.iface_id += 1
iface_id = self.iface_id
self.iface_id += 1
return iface_id
def new_iface(
self, net: CoreNetworkBase, iface_data: InterfaceData
) -> CoreInterface:
logger.info("creating interface")
ips = iface_data.get_ips()
iface_id = iface_data.id
if iface_id is None:
iface_id = self.next_iface_id()
name = iface_data.name
if name is None:
name = f"gt{iface_id}"
_, remote_tap = self.session.distributed.create_gre_tunnel(
net, self.server, iface_data.mtu, self.up
)
self.adopt_iface(remote_tap, iface_id, iface_data.mac, ips)
return remote_tap
def privatedir(self, dir_path: Path) -> None:
if not str(dir_path).startswith("/"):
raise CoreError(f"private directory path not fully qualified: {dir_path}")
host_path = self.host_path(dir_path, is_dir=True)
self.host_cmd(f"mkdir -p {host_path}")
self.mount(host_path, dir_path)
def mount(self, src_path: Path, target_path: Path) -> None:
logger.debug("node(%s) mounting: %s at %s", self.name, src_path, target_path)
self.cmd(f"mkdir -p {target_path}")
self.host_cmd(f"{MOUNT} --bind {src_path} {target_path}", cwd=self.directory)
self._mounts.append((src_path, target_path))
def umount(self, target_path: Path) -> None:
logger.info("unmounting '%s'", target_path)
try:
self.host_cmd(f"{UMOUNT} -l {target_path}", cwd=self.directory)
except CoreCommandError:
logger.exception("unmounting failed for %s", target_path)
def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str:
return self.host_cmd(args, wait=wait)
def create_dir(self, dir_path: Path) -> None:
raise CoreError("physical node does not support creating directories")
def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None:
raise CoreError("physical node does not support creating files")
def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None:
raise CoreError("physical node does not support copying files")
class Rj45Node(CoreNodeBase):
"""
RJ45Node is a physical interface on the host linked to the emulated
network.
"""
apitype: NodeTypes = NodeTypes.RJ45
type: str = "rj45"
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
mtu: int = DEFAULT_MTU,
server: DistributedServer = None,
) -> None:
"""
Create an RJ45Node instance.
:param session: core session instance
:param _id: node id
:param name: node name
:param mtu: rj45 mtu
:param server: remote server node
will run on, default is None for localhost
"""
super().__init__(session, _id, name, server)
self.iface: CoreInterface = CoreInterface(
session, name, name, mtu, server, self
)
self.iface.transport_type = TransportType.RAW
self.lock: threading.RLock = threading.RLock()
self.iface_id: Optional[int] = None
self.old_up: bool = False
self.old_addrs: List[Tuple[str, Optional[str]]] = []
def startup(self) -> None:
"""
Set the interface in the up state.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
# interface will also be marked up during net.attach()
self.savestate()
self.net_client.device_up(self.iface.localname)
self.up = True
def shutdown(self) -> None:
"""
Bring the interface down. Remove any addresses and queuing
disciplines.
:return: nothing
"""
if not self.up:
return
localname = self.iface.localname
self.net_client.device_down(localname)
self.net_client.device_flush(localname)
try:
self.net_client.delete_tc(localname)
except CoreCommandError:
pass
self.up = False
self.restorestate()
def path_exists(self, path: str) -> bool:
"""
Determines if a file or directory path exists.
:param path: path to file or directory
:return: True if path exists, False otherwise
"""
try:
self.host_cmd(f"{TEST} -e {path}")
return True
except CoreCommandError:
return False
def new_iface(
self, net: CoreNetworkBase, iface_data: InterfaceData
) -> CoreInterface:
"""
This is called when linking with another node. Since this node
represents an interface, we do not create another object here,
but attach ourselves to the given network.
:param net: new network instance
:param iface_data: interface data for new interface
:return: interface index
:raises ValueError: when an interface has already been created, one max
"""
with self.lock:
iface_id = iface_data.id
if iface_id is None:
iface_id = 0
if self.iface.net is not None:
raise CoreError(
f"RJ45({self.name}) nodes support at most 1 network interface"
)
self.ifaces[iface_id] = self.iface
self.iface_id = iface_id
self.iface.attachnet(net)
for ip in iface_data.get_ips():
self.add_ip(ip)
return self.iface
def delete_iface(self, iface_id: int) -> None:
"""
Delete a network interface.
:param iface_id: interface index to delete
:return: nothing
"""
self.get_iface(iface_id)
self.ifaces.pop(iface_id)
if self.iface.net is None:
raise CoreError(
f"RJ45({self.name}) is not currently connected to a network"
)
self.iface.detachnet()
self.iface.net = None
self.shutdown()
def get_iface(self, iface_id: int) -> CoreInterface:
if iface_id != self.iface_id or iface_id not in self.ifaces:
raise CoreError(f"node({self.name}) interface({iface_id}) does not exist")
return self.iface
def get_iface_id(self, iface: CoreInterface) -> Optional[int]:
"""
Retrieve network interface index.
:param iface: network interface to retrieve
index for
:return: interface index, None otherwise
"""
if iface is not self.iface:
raise CoreError(f"node({self.name}) does not have interface({iface.name})")
return self.iface_id
def add_ip(self, ip: str) -> None:
"""
Add an ip address to an interface in the format "10.0.0.1/24".
:param ip: address to add to interface
:return: nothing
:raises CoreError: when ip address provided is invalid
:raises CoreCommandError: when a non-zero exit status occurs
"""
self.iface.add_ip(ip)
if self.up:
self.net_client.create_address(self.name, ip)
def remove_ip(self, ip: str) -> None:
"""
Remove an ip address from an interface in the format "10.0.0.1/24".
:param ip: ip address to remove from interface
:return: nothing
:raises CoreError: when ip address provided is invalid
:raises CoreCommandError: when a non-zero exit status occurs
"""
self.iface.remove_ip(ip)
if self.up:
self.net_client.delete_address(self.name, ip)
def savestate(self) -> None:
"""
Save the addresses and other interface state before using the
interface for emulation purposes. TODO: save/restore the PROMISC flag
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.old_up = False
self.old_addrs: List[Tuple[str, Optional[str]]] = []
localname = self.iface.localname
output = self.net_client.address_show(localname)
for line in | |
<reponame>adrianantonypillai/taurus
"""
Copyright 2018 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ast
import json
import math
import re
import string
from collections import OrderedDict
import astunparse
from bzt import TaurusConfigError, TaurusInternalException
from bzt.engine import Scenario
from bzt.requests_model import HTTPRequest, HierarchicRequestParser, TransactionBlock, SetVariables
from bzt.six import parse, string_types, iteritems, text_type, etree
from bzt.utils import PythonGenerator, dehumanize_time, ensure_is_dict
from .jmeter_functions import Base64DecodeFunction, UrlEncodeFunction, UuidFunction
from .jmeter_functions import TimeFunction, RandomFunction, RandomStringFunction, Base64EncodeFunction
def normalize_class_name(text):
allowed_chars = "%s%s%s" % (string.digits, string.ascii_letters, '_')
split_separator = re.split(r'[\-_]', text)
return ''.join([capitalize_class_name(part, allowed_chars) for part in split_separator])
def capitalize_class_name(text, allowed_chars):
return filter_string(text, allowed_chars).capitalize()
def filter_string(text, allowed_chars):
return ''.join(c for c in text if c in allowed_chars)
def normalize_method_name(text):
allowed_chars = "%s%s%s" % (string.digits, string.ascii_letters, '- ')
return filter_string(text, allowed_chars).replace(' ', '_').replace('-', '_')
def create_class_name(label):
return 'TestAPI' if label.startswith('autogenerated') else 'Test%s' % normalize_class_name(label)
def create_method_name(label):
return 'test_requests' if label.startswith('autogenerated') else normalize_method_name(label)
class JMeterExprCompiler(object):
def __init__(self, parent_log):
self.log = parent_log.getChild(self.__class__.__name__)
@staticmethod
def gen_var_accessor(varname, ctx=None):
if ctx is None:
ctx = ast.Load()
return ast.Subscript(
value=ast.Name(id="self.vars", ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=varname)),
ctx=ctx
)
def gen_expr(self, value):
if isinstance(value, bool):
return ast.Name(id="True" if value else "False", ctx=ast.Load())
elif isinstance(value, (int, float)):
return ast.Num(n=value)
elif isinstance(value, string_types):
# if is has interpolation - break it into either a `"".format(args)` form or a Name node
# otherwise - it's a string literal
parts = re.split(r'(\$\{.*?\})', value)
format_args = []
for item in parts:
if item:
if item.startswith("${") and item.endswith("}"):
value = value.replace(item, "{}")
compiled = self.translate_jmeter_expr(item[2:-1])
format_args.append(compiled)
if format_args:
if len(format_args) == 1 and value == "{}":
result = format_args[0]
else:
result = ast.Call(
func=ast.Attribute(
value=ast.Str(s=value),
attr='format',
ctx=ast.Load(),
),
args=format_args,
keywords=[],
starargs=None,
kwargs=None
)
else:
result = ast.Str(s=value)
return result
elif isinstance(value, type(None)):
return ast.Name(id="None", ctx=ast.Load())
elif isinstance(value, dict):
items = sorted(list(iteritems(value)))
return ast.Dict(keys=[self.gen_expr(k) for k, _ in items],
values=[self.gen_expr(v) for _, v in items])
elif isinstance(value, list):
return ast.List(elts=[self.gen_expr(val) for val in value], ctx=ast.Load())
elif isinstance(value, tuple):
return ast.Tuple(elts=[self.gen_expr(val) for val in value], ctx=ast.Load())
elif isinstance(value, ast.AST):
return value
else:
return value
def translate_jmeter_expr(self, expr):
"""
Translates JMeter expression into Apiritif-based Python expression.
:type expr: str
:return:
"""
self.log.debug("Attempting to translate JMeter expression %r", expr)
functions = {
'__time': TimeFunction,
'__Random': RandomFunction,
'__RandomString': RandomStringFunction,
'__base64Encode': Base64EncodeFunction,
'__base64Decode': Base64DecodeFunction,
'__urlencode': UrlEncodeFunction,
'__UUID': UuidFunction,
}
regexp = r"(\w+)\((.*?)\)"
args_re = r'(?<!\\),'
match = re.match(regexp, expr)
if match is None: # doesn't look like JMeter func, translate as a var
return self.gen_var_accessor(expr)
varname, arguments = match.groups()
if arguments is None: # plain variable
result = self.gen_var_accessor(varname)
else: # function call
if not arguments:
args = []
else:
# parse arguments: split by ',' but not '\,'
args = [arg.strip() for arg in re.split(args_re, arguments)]
if varname not in functions: # unknown function
return ast.Name(id=varname, ctx=ast.Load())
self.log.debug("Translating function %s with arguments %s", varname, arguments)
func = functions[varname](self)
result = func.to_python(args)
if result is None:
result = ast.Name(id=varname, ctx=ast.Load())
self.log.debug("Compile: %r -> %r", expr, result)
return result
class SeleniumScriptBuilder(PythonGenerator):
"""
:type window_size: tuple[int,int]
"""
IMPORTS_SELENIUM = """import unittest
import os
import re
from time import sleep, time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import apiritif
"""
IMPORTS_APPIUM = """import unittest
import os
import re
from time import sleep, time
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import apiritif
"""
TAGS = ("byName", "byID", "byCSS", "byXPath", "byLinkText")
def __init__(self, scenario, parent_logger, wdlog, utils_file,
ignore_unknown_actions=False, generate_markers=None, capabilities=None, label='', wd_addr=None):
super(SeleniumScriptBuilder, self).__init__(scenario, parent_logger)
self.label = label
self.remote_address = wd_addr
self.capabilities = capabilities or {}
self.window_size = None
self.wdlog = wdlog
self.appium = False
self.utils_file = utils_file
self.ignore_unknown_actions = ignore_unknown_actions
self.generate_markers = generate_markers
def gen_asserts(self, config, indent=None):
test_method = []
if "assert" in config:
test_method.append(self.gen_statement("body = self.driver.page_source", indent=indent))
for assert_config in config.get("assert"):
for elm in self.gen_assertion(assert_config, indent=indent):
test_method.append(elm)
return test_method
def gen_think_time(self, think_time, indent=None):
test_method = []
if think_time is not None:
delay = dehumanize_time(think_time)
if delay > 0:
test_method.append(self.gen_statement("sleep(%s)" % dehumanize_time(think_time), indent=indent))
test_method.append(self.gen_new_line())
return test_method
def gen_request(self, req, indent=None):
default_address = self.scenario.get("default-address")
transaction_contents = []
if req.url is not None:
parsed_url = parse.urlparse(req.url)
if default_address and not parsed_url.netloc:
url = default_address + req.url
else:
url = req.url
transaction_contents.append(
self.gen_statement("self.driver.get(self.template(%r))" % url, indent=indent))
transaction_contents.append(self.gen_new_line())
return transaction_contents
def build_source_code(self):
self.log.debug("Generating Test Case test methods")
test_class = self.gen_class_definition("TestRequests", ["unittest.TestCase"])
test_class.append(self.gen_setup_method())
test_class.append(self.gen_teardown_method())
requests = self.scenario.get_requests(require_url=False)
test_method = self.gen_test_method('test_requests')
self.gen_setup(test_method)
for i, req in enumerate(requests, 1):
self._fill_test_method(req, test_method)
if i != len(requests):
test_method.append(self.gen_new_line())
test_class.append(test_method)
self.root.append(self.gen_statement("# coding=utf-8", indent=0))
self.root.append(self.add_imports())
self.root.append(test_class)
self.root.append(self.add_utilities())
def _fill_test_method(self, req, test_method):
if req.label:
label = req.label
elif req.url:
label = req.url
else:
raise TaurusConfigError("You must specify at least 'url' or 'label' for each requests item")
if self.generate_markers:
test_method.append(self.gen_statement("try:", indent=self.INDENT_STEP * 2))
indent = 3
marker = "self.driver.execute_script('/* FLOW_MARKER test-case-start */', " \
"{'testCaseName': %r, 'testSuiteName': %r})" % (label, self.label)
test_method.append(self.gen_statement(marker, indent=self.INDENT_STEP * indent))
test_method.append(self.gen_new_line())
else:
indent = 2
test_method.append(self.gen_statement('with apiritif.transaction_logged(self.template(%r)):' % label,
indent=self.INDENT_STEP * indent))
transaction_contents = []
transaction_contents.extend(self.gen_request(req, indent=self.INDENT_STEP * (indent + 1)))
if req.url is not None and req.timeout is not None:
test_method.append(self.gen_impl_wait(req.timeout, indent=self.INDENT_STEP * (indent + 1)))
action_append = False
for action_config in req.config.get("actions", []):
action = self.gen_action(action_config, indent=self.INDENT_STEP * (indent + 1))
if action:
transaction_contents.extend(action)
action_append = True
if action_append:
transaction_contents.append(self.gen_new_line())
transaction_contents.extend(self.gen_asserts(req.config, indent=self.INDENT_STEP * (indent + 1)))
if transaction_contents:
test_method.extend(transaction_contents)
else:
test_method.append(self.gen_statement('pass', indent=self.INDENT_STEP * (indent + 1)))
test_method.append(self.gen_new_line())
test_method.extend(self.gen_think_time(req.get_think_time(), indent=self.INDENT_STEP * indent))
if self.generate_markers:
marker = "self.driver.execute_script('/* FLOW_MARKER test-case-stop */', " \
"{'status': %s, 'message': %s})"
test_method.append(self.gen_statement("except AssertionError as exc:", indent=self.INDENT_STEP * 2))
test_method.append(self.gen_statement(marker % (repr('failed'), 'str(exc)'), indent=self.INDENT_STEP * 3))
test_method.append(self.gen_statement("raise", indent=self.INDENT_STEP * 3))
test_method.append(self.gen_statement("except BaseException as exc:", indent=self.INDENT_STEP * 2))
test_method.append(self.gen_statement(marker % (repr('broken'), 'str(exc)'), indent=self.INDENT_STEP * 3))
test_method.append(self.gen_statement("raise", indent=self.INDENT_STEP * 3))
test_method.append(self.gen_statement("else:", indent=self.INDENT_STEP * 2))
test_method.append(self.gen_statement(marker % (repr('success'), repr('')), indent=self.INDENT_STEP * 3))
def add_imports(self):
imports = super(SeleniumScriptBuilder, self).add_imports()
if self.appium:
imports.text = self.IMPORTS_APPIUM
else:
imports.text = self.IMPORTS_SELENIUM
return imports
def add_utilities(self):
with open(self.utils_file) as fds:
utilities_source_lines = fds.read()
utils = etree.Element("utilities")
utils.text = "\n" + utilities_source_lines
return utils
def gen_global_vars(self):
variables = self.scenario.get("variables")
stmts = [
"self.vars = {}",
"self.template = Template(self.vars)"
]
for key in sorted(variables.keys()):
stmts.append("self.vars['%s'] = %r" % (key, variables[key]))
stmts.append("")
return [self.gen_statement(stmt) for stmt in stmts]
def _add_url_request(self, default_address, req, test_method):
parsed_url = parse.urlparse(req.url)
if default_address is not None and not parsed_url.netloc:
url = default_address + req.url
else:
url = req.url
if req.timeout is not None:
test_method.append(self.gen_impl_wait(req.timeout))
test_method.append(self.gen_statement("self.driver.get(self.template(%r))" % url))
def gen_setup(self, test_method):
timeout = self.scenario.get("timeout", "30s")
scenario_timeout = dehumanize_time(timeout)
test_method.append(self.gen_impl_wait(scenario_timeout))
test_method.append(self.gen_new_line())
def _check_platform(self):
mobile_browsers = ["chrome", "safari"]
mobile_platforms = ["android", "ios"]
browser = self.capabilities.get("browserName", "")
browser = self.scenario.get("browser", browser)
browser = browser.lower() # todo: whether we should take browser as is? (without lower case)
browser_platform = None
if browser:
browser_split = browser.split("-")
browser = browser_split[0]
browsers = ["firefox", "chrome", "ie", "opera"] + mobile_browsers
if browser not in browsers:
raise TaurusConfigError("Unsupported browser name: %s" % browser)
if len(browser_split) > 1:
browser_platform = browser_split[1]
if self.remote_address:
if browser and browser != "remote":
msg = "Forcing browser to Remote, because of remote WebDriver address, use '%s' as browserName"
self.log.warning(msg % browser)
self.capabilities["browserName"] = browser
browser = "remote"
if self.generate_markers is None: # if not set by user - set to true
self.generate_markers = True
elif browser in mobile_browsers and browser_platform in mobile_platforms:
self.appium = True
self.remote_address = "http://localhost:4723/wd/hub"
self.capabilities["platformName"] = browser_platform
self.capabilities["browserName"] = browser
browser = "remote" # Force to use remote web driver
elif not browser:
browser = "firefox"
return browser
def gen_setup_method(self):
self.log.debug("Generating setUp test method")
browser = self._check_platform()
headless = self.scenario.get("headless", False)
if headless:
self.log.info("Headless mode works only with Selenium |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.