code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _tree_grow(self, y_true, X, cost_mat, level=0):
""" Private recursive function to grow the decision tree.
Parameters
----------
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
Tree : Object
Container of the decision tree
NOTE: it is not the same structure as the sklearn.tree.tree object
"""
#TODO: Find error, add min_samples_split
if len(X.shape) == 1:
tree = dict(y_pred=y_true, y_prob=0.5, level=level, split=-1, n_samples=1, gain=0)
return tree
# Calculate the best split of the current node
split, gain, Xl_pred, y_pred, y_prob = self._best_split(y_true, X, cost_mat)
n_samples, n_features = X.shape
# Construct the tree object as a dictionary
#TODO: Convert tree to be equal to sklearn.tree.tree object
tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=n_samples, gain=gain)
# Check the stopping criteria
if gain < self.min_gain:
return tree
if self.max_depth is not None:
if level >= self.max_depth:
return tree
if n_samples <= self.min_samples_split:
return tree
j, l = split
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0]
if min(n_samples_Xl, n_samples_Xr) <= self.min_samples_leaf:
return tree
# No stooping criteria is met
tree['split'] = split
tree['node'] = self.tree_.n_nodes
self.tree_.n_nodes += 1
tree['sl'] = self._tree_grow(y_true[filter_Xl], X[filter_Xl], cost_mat[filter_Xl], level + 1)
tree['sr'] = self._tree_grow(y_true[filter_Xr], X[filter_Xr], cost_mat[filter_Xr], level + 1)
return tree
|
def function[_tree_grow, parameter[self, y_true, X, cost_mat, level]]:
constant[ Private recursive function to grow the decision tree.
Parameters
----------
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
Tree : Object
Container of the decision tree
NOTE: it is not the same structure as the sklearn.tree.tree object
]
if compare[call[name[len], parameter[name[X].shape]] equal[==] constant[1]] begin[:]
variable[tree] assign[=] call[name[dict], parameter[]]
return[name[tree]]
<ast.Tuple object at 0x7da1b122a620> assign[=] call[name[self]._best_split, parameter[name[y_true], name[X], name[cost_mat]]]
<ast.Tuple object at 0x7da1b122ab60> assign[=] name[X].shape
variable[tree] assign[=] call[name[dict], parameter[]]
if compare[name[gain] less[<] name[self].min_gain] begin[:]
return[name[tree]]
if compare[name[self].max_depth is_not constant[None]] begin[:]
if compare[name[level] greater_or_equal[>=] name[self].max_depth] begin[:]
return[name[tree]]
if compare[name[n_samples] less_or_equal[<=] name[self].min_samples_split] begin[:]
return[name[tree]]
<ast.Tuple object at 0x7da1b122a9b0> assign[=] name[split]
variable[filter_Xl] assign[=] compare[call[name[X]][tuple[[<ast.Slice object at 0x7da1b1229690>, <ast.Name object at 0x7da1b1229750>]]] less_or_equal[<=] name[l]]
variable[filter_Xr] assign[=] <ast.UnaryOp object at 0x7da1b1228d30>
variable[n_samples_Xl] assign[=] call[call[call[name[np].nonzero, parameter[name[filter_Xl]]]][constant[0]].shape][constant[0]]
variable[n_samples_Xr] assign[=] call[call[call[name[np].nonzero, parameter[name[filter_Xr]]]][constant[0]].shape][constant[0]]
if compare[call[name[min], parameter[name[n_samples_Xl], name[n_samples_Xr]]] less_or_equal[<=] name[self].min_samples_leaf] begin[:]
return[name[tree]]
call[name[tree]][constant[split]] assign[=] name[split]
call[name[tree]][constant[node]] assign[=] name[self].tree_.n_nodes
<ast.AugAssign object at 0x7da1b1228280>
call[name[tree]][constant[sl]] assign[=] call[name[self]._tree_grow, parameter[call[name[y_true]][name[filter_Xl]], call[name[X]][name[filter_Xl]], call[name[cost_mat]][name[filter_Xl]], binary_operation[name[level] + constant[1]]]]
call[name[tree]][constant[sr]] assign[=] call[name[self]._tree_grow, parameter[call[name[y_true]][name[filter_Xr]], call[name[X]][name[filter_Xr]], call[name[cost_mat]][name[filter_Xr]], binary_operation[name[level] + constant[1]]]]
return[name[tree]]
|
keyword[def] identifier[_tree_grow] ( identifier[self] , identifier[y_true] , identifier[X] , identifier[cost_mat] , identifier[level] = literal[int] ):
literal[string]
keyword[if] identifier[len] ( identifier[X] . identifier[shape] )== literal[int] :
identifier[tree] = identifier[dict] ( identifier[y_pred] = identifier[y_true] , identifier[y_prob] = literal[int] , identifier[level] = identifier[level] , identifier[split] =- literal[int] , identifier[n_samples] = literal[int] , identifier[gain] = literal[int] )
keyword[return] identifier[tree]
identifier[split] , identifier[gain] , identifier[Xl_pred] , identifier[y_pred] , identifier[y_prob] = identifier[self] . identifier[_best_split] ( identifier[y_true] , identifier[X] , identifier[cost_mat] )
identifier[n_samples] , identifier[n_features] = identifier[X] . identifier[shape]
identifier[tree] = identifier[dict] ( identifier[y_pred] = identifier[y_pred] , identifier[y_prob] = identifier[y_prob] , identifier[level] = identifier[level] , identifier[split] =- literal[int] , identifier[n_samples] = identifier[n_samples] , identifier[gain] = identifier[gain] )
keyword[if] identifier[gain] < identifier[self] . identifier[min_gain] :
keyword[return] identifier[tree]
keyword[if] identifier[self] . identifier[max_depth] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[level] >= identifier[self] . identifier[max_depth] :
keyword[return] identifier[tree]
keyword[if] identifier[n_samples] <= identifier[self] . identifier[min_samples_split] :
keyword[return] identifier[tree]
identifier[j] , identifier[l] = identifier[split]
identifier[filter_Xl] =( identifier[X] [:, identifier[j] ]<= identifier[l] )
identifier[filter_Xr] =~ identifier[filter_Xl]
identifier[n_samples_Xl] = identifier[np] . identifier[nonzero] ( identifier[filter_Xl] )[ literal[int] ]. identifier[shape] [ literal[int] ]
identifier[n_samples_Xr] = identifier[np] . identifier[nonzero] ( identifier[filter_Xr] )[ literal[int] ]. identifier[shape] [ literal[int] ]
keyword[if] identifier[min] ( identifier[n_samples_Xl] , identifier[n_samples_Xr] )<= identifier[self] . identifier[min_samples_leaf] :
keyword[return] identifier[tree]
identifier[tree] [ literal[string] ]= identifier[split]
identifier[tree] [ literal[string] ]= identifier[self] . identifier[tree_] . identifier[n_nodes]
identifier[self] . identifier[tree_] . identifier[n_nodes] += literal[int]
identifier[tree] [ literal[string] ]= identifier[self] . identifier[_tree_grow] ( identifier[y_true] [ identifier[filter_Xl] ], identifier[X] [ identifier[filter_Xl] ], identifier[cost_mat] [ identifier[filter_Xl] ], identifier[level] + literal[int] )
identifier[tree] [ literal[string] ]= identifier[self] . identifier[_tree_grow] ( identifier[y_true] [ identifier[filter_Xr] ], identifier[X] [ identifier[filter_Xr] ], identifier[cost_mat] [ identifier[filter_Xr] ], identifier[level] + literal[int] )
keyword[return] identifier[tree]
|
def _tree_grow(self, y_true, X, cost_mat, level=0):
""" Private recursive function to grow the decision tree.
Parameters
----------
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
Tree : Object
Container of the decision tree
NOTE: it is not the same structure as the sklearn.tree.tree object
"""
#TODO: Find error, add min_samples_split
if len(X.shape) == 1:
tree = dict(y_pred=y_true, y_prob=0.5, level=level, split=-1, n_samples=1, gain=0)
return tree # depends on [control=['if'], data=[]]
# Calculate the best split of the current node
(split, gain, Xl_pred, y_pred, y_prob) = self._best_split(y_true, X, cost_mat)
(n_samples, n_features) = X.shape
# Construct the tree object as a dictionary
#TODO: Convert tree to be equal to sklearn.tree.tree object
tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=n_samples, gain=gain)
# Check the stopping criteria
if gain < self.min_gain:
return tree # depends on [control=['if'], data=[]]
if self.max_depth is not None:
if level >= self.max_depth:
return tree # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if n_samples <= self.min_samples_split:
return tree # depends on [control=['if'], data=[]]
(j, l) = split
filter_Xl = X[:, j] <= l
filter_Xr = ~filter_Xl
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0]
if min(n_samples_Xl, n_samples_Xr) <= self.min_samples_leaf:
return tree # depends on [control=['if'], data=[]]
# No stooping criteria is met
tree['split'] = split
tree['node'] = self.tree_.n_nodes
self.tree_.n_nodes += 1
tree['sl'] = self._tree_grow(y_true[filter_Xl], X[filter_Xl], cost_mat[filter_Xl], level + 1)
tree['sr'] = self._tree_grow(y_true[filter_Xr], X[filter_Xr], cost_mat[filter_Xr], level + 1)
return tree
|
def dist_calc_matrix(surf, cortex, labels, exceptions = ['Unknown', 'Medial_wall'], verbose = True):
"""
Calculate exact geodesic distance along cortical surface from set of source nodes.
"labels" specifies the freesurfer label file to use. All values will be used other than those
specified in "exceptions" (default: 'Unknown' and 'Medial_Wall').
returns:
dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels
rois: label names in order of n
"""
cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex)
# remove exceptions from label list:
label_list = sd.load.get_freesurfer_label(labels, verbose = False)
rs = np.where([a not in exceptions for a in label_list])[0]
rois = [label_list[r] for r in rs]
if verbose:
print("# of regions: " + str(len(rois)))
# calculate distance from each region to all nodes:
dist_roi = []
for roi in rois:
source_nodes = sd.load.load_freesurfer_label(labels, roi)
translated_source_nodes = translate_src(source_nodes, cortex)
dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles,
source_indices = translated_source_nodes))
if verbose:
print(roi)
dist_roi = np.array(dist_roi)
# Calculate min distance per region:
dist_mat = []
for roi in rois:
source_nodes = sd.load.load_freesurfer_label(labels, roi)
translated_source_nodes = translate_src(source_nodes, cortex)
dist_mat.append(np.min(dist_roi[:,translated_source_nodes], axis = 1))
dist_mat = np.array(dist_mat)
return dist_mat, rois
|
def function[dist_calc_matrix, parameter[surf, cortex, labels, exceptions, verbose]]:
constant[
Calculate exact geodesic distance along cortical surface from set of source nodes.
"labels" specifies the freesurfer label file to use. All values will be used other than those
specified in "exceptions" (default: 'Unknown' and 'Medial_Wall').
returns:
dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels
rois: label names in order of n
]
<ast.Tuple object at 0x7da18dc07f10> assign[=] call[name[surf_keep_cortex], parameter[name[surf], name[cortex]]]
variable[label_list] assign[=] call[name[sd].load.get_freesurfer_label, parameter[name[labels]]]
variable[rs] assign[=] call[call[name[np].where, parameter[<ast.ListComp object at 0x7da18dc07700>]]][constant[0]]
variable[rois] assign[=] <ast.ListComp object at 0x7da18dc043d0>
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[# of regions: ] + call[name[str], parameter[call[name[len], parameter[name[rois]]]]]]]]
variable[dist_roi] assign[=] list[[]]
for taget[name[roi]] in starred[name[rois]] begin[:]
variable[source_nodes] assign[=] call[name[sd].load.load_freesurfer_label, parameter[name[labels], name[roi]]]
variable[translated_source_nodes] assign[=] call[name[translate_src], parameter[name[source_nodes], name[cortex]]]
call[name[dist_roi].append, parameter[call[name[gdist].compute_gdist, parameter[name[cortex_vertices], name[cortex_triangles]]]]]
if name[verbose] begin[:]
call[name[print], parameter[name[roi]]]
variable[dist_roi] assign[=] call[name[np].array, parameter[name[dist_roi]]]
variable[dist_mat] assign[=] list[[]]
for taget[name[roi]] in starred[name[rois]] begin[:]
variable[source_nodes] assign[=] call[name[sd].load.load_freesurfer_label, parameter[name[labels], name[roi]]]
variable[translated_source_nodes] assign[=] call[name[translate_src], parameter[name[source_nodes], name[cortex]]]
call[name[dist_mat].append, parameter[call[name[np].min, parameter[call[name[dist_roi]][tuple[[<ast.Slice object at 0x7da18f00c400>, <ast.Name object at 0x7da18f00cc40>]]]]]]]
variable[dist_mat] assign[=] call[name[np].array, parameter[name[dist_mat]]]
return[tuple[[<ast.Name object at 0x7da18f00c940>, <ast.Name object at 0x7da18f00fe20>]]]
|
keyword[def] identifier[dist_calc_matrix] ( identifier[surf] , identifier[cortex] , identifier[labels] , identifier[exceptions] =[ literal[string] , literal[string] ], identifier[verbose] = keyword[True] ):
literal[string]
identifier[cortex_vertices] , identifier[cortex_triangles] = identifier[surf_keep_cortex] ( identifier[surf] , identifier[cortex] )
identifier[label_list] = identifier[sd] . identifier[load] . identifier[get_freesurfer_label] ( identifier[labels] , identifier[verbose] = keyword[False] )
identifier[rs] = identifier[np] . identifier[where] ([ identifier[a] keyword[not] keyword[in] identifier[exceptions] keyword[for] identifier[a] keyword[in] identifier[label_list] ])[ literal[int] ]
identifier[rois] =[ identifier[label_list] [ identifier[r] ] keyword[for] identifier[r] keyword[in] identifier[rs] ]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + identifier[str] ( identifier[len] ( identifier[rois] )))
identifier[dist_roi] =[]
keyword[for] identifier[roi] keyword[in] identifier[rois] :
identifier[source_nodes] = identifier[sd] . identifier[load] . identifier[load_freesurfer_label] ( identifier[labels] , identifier[roi] )
identifier[translated_source_nodes] = identifier[translate_src] ( identifier[source_nodes] , identifier[cortex] )
identifier[dist_roi] . identifier[append] ( identifier[gdist] . identifier[compute_gdist] ( identifier[cortex_vertices] , identifier[cortex_triangles] ,
identifier[source_indices] = identifier[translated_source_nodes] ))
keyword[if] identifier[verbose] :
identifier[print] ( identifier[roi] )
identifier[dist_roi] = identifier[np] . identifier[array] ( identifier[dist_roi] )
identifier[dist_mat] =[]
keyword[for] identifier[roi] keyword[in] identifier[rois] :
identifier[source_nodes] = identifier[sd] . identifier[load] . identifier[load_freesurfer_label] ( identifier[labels] , identifier[roi] )
identifier[translated_source_nodes] = identifier[translate_src] ( identifier[source_nodes] , identifier[cortex] )
identifier[dist_mat] . identifier[append] ( identifier[np] . identifier[min] ( identifier[dist_roi] [:, identifier[translated_source_nodes] ], identifier[axis] = literal[int] ))
identifier[dist_mat] = identifier[np] . identifier[array] ( identifier[dist_mat] )
keyword[return] identifier[dist_mat] , identifier[rois]
|
def dist_calc_matrix(surf, cortex, labels, exceptions=['Unknown', 'Medial_wall'], verbose=True):
"""
Calculate exact geodesic distance along cortical surface from set of source nodes.
"labels" specifies the freesurfer label file to use. All values will be used other than those
specified in "exceptions" (default: 'Unknown' and 'Medial_Wall').
returns:
dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels
rois: label names in order of n
"""
(cortex_vertices, cortex_triangles) = surf_keep_cortex(surf, cortex)
# remove exceptions from label list:
label_list = sd.load.get_freesurfer_label(labels, verbose=False)
rs = np.where([a not in exceptions for a in label_list])[0]
rois = [label_list[r] for r in rs]
if verbose:
print('# of regions: ' + str(len(rois))) # depends on [control=['if'], data=[]]
# calculate distance from each region to all nodes:
dist_roi = []
for roi in rois:
source_nodes = sd.load.load_freesurfer_label(labels, roi)
translated_source_nodes = translate_src(source_nodes, cortex)
dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices=translated_source_nodes))
if verbose:
print(roi) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['roi']]
dist_roi = np.array(dist_roi)
# Calculate min distance per region:
dist_mat = []
for roi in rois:
source_nodes = sd.load.load_freesurfer_label(labels, roi)
translated_source_nodes = translate_src(source_nodes, cortex)
dist_mat.append(np.min(dist_roi[:, translated_source_nodes], axis=1)) # depends on [control=['for'], data=['roi']]
dist_mat = np.array(dist_mat)
return (dist_mat, rois)
|
def angle(x1, y1, x2, y2):
""" The angle in degrees between two vectors.
"""
sign = 1.0
usign = (x1*y2 - y1*x2)
if usign < 0:
sign = -1.0
num = x1*x2 + y1*y2
den = hypot(x1,y1) * hypot(x2,y2)
ratio = min(max(num/den, -1.0), 1.0)
return sign * degrees(acos(ratio))
|
def function[angle, parameter[x1, y1, x2, y2]]:
constant[ The angle in degrees between two vectors.
]
variable[sign] assign[=] constant[1.0]
variable[usign] assign[=] binary_operation[binary_operation[name[x1] * name[y2]] - binary_operation[name[y1] * name[x2]]]
if compare[name[usign] less[<] constant[0]] begin[:]
variable[sign] assign[=] <ast.UnaryOp object at 0x7da18dc04820>
variable[num] assign[=] binary_operation[binary_operation[name[x1] * name[x2]] + binary_operation[name[y1] * name[y2]]]
variable[den] assign[=] binary_operation[call[name[hypot], parameter[name[x1], name[y1]]] * call[name[hypot], parameter[name[x2], name[y2]]]]
variable[ratio] assign[=] call[name[min], parameter[call[name[max], parameter[binary_operation[name[num] / name[den]], <ast.UnaryOp object at 0x7da1b23446d0>]], constant[1.0]]]
return[binary_operation[name[sign] * call[name[degrees], parameter[call[name[acos], parameter[name[ratio]]]]]]]
|
keyword[def] identifier[angle] ( identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] ):
literal[string]
identifier[sign] = literal[int]
identifier[usign] =( identifier[x1] * identifier[y2] - identifier[y1] * identifier[x2] )
keyword[if] identifier[usign] < literal[int] :
identifier[sign] =- literal[int]
identifier[num] = identifier[x1] * identifier[x2] + identifier[y1] * identifier[y2]
identifier[den] = identifier[hypot] ( identifier[x1] , identifier[y1] )* identifier[hypot] ( identifier[x2] , identifier[y2] )
identifier[ratio] = identifier[min] ( identifier[max] ( identifier[num] / identifier[den] ,- literal[int] ), literal[int] )
keyword[return] identifier[sign] * identifier[degrees] ( identifier[acos] ( identifier[ratio] ))
|
def angle(x1, y1, x2, y2):
""" The angle in degrees between two vectors.
"""
sign = 1.0
usign = x1 * y2 - y1 * x2
if usign < 0:
sign = -1.0 # depends on [control=['if'], data=[]]
num = x1 * x2 + y1 * y2
den = hypot(x1, y1) * hypot(x2, y2)
ratio = min(max(num / den, -1.0), 1.0)
return sign * degrees(acos(ratio))
|
def _compute_jars_to_resolve_and_pin(raw_jars, artifact_set, manager):
"""
This method provides settled lists of jar dependencies and coordinates
based on conflict management.
:param raw_jars: a collection of `JarDependencies`
:param artifact_set: PinnedJarArtifactSet
:param manager: JarDependencyManagement
:return: (list of settled `JarDependency`, set of pinned `M2Coordinate`)
"""
if artifact_set is None:
artifact_set = PinnedJarArtifactSet()
untouched_pinned_artifact = {M2Coordinate.create(x) for x in artifact_set}
jar_list = list(raw_jars)
for i, dep in enumerate(jar_list):
direct_coord = M2Coordinate.create(dep)
# Portion to manage pinned jars in case of conflict
if direct_coord in artifact_set:
managed_coord = artifact_set[direct_coord]
untouched_pinned_artifact.remove(managed_coord)
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
# Once a version is settled, we force it anyway
jar_list[i] = dep.copy(rev=coord.rev, force=True)
return jar_list, untouched_pinned_artifact
|
def function[_compute_jars_to_resolve_and_pin, parameter[raw_jars, artifact_set, manager]]:
constant[
This method provides settled lists of jar dependencies and coordinates
based on conflict management.
:param raw_jars: a collection of `JarDependencies`
:param artifact_set: PinnedJarArtifactSet
:param manager: JarDependencyManagement
:return: (list of settled `JarDependency`, set of pinned `M2Coordinate`)
]
if compare[name[artifact_set] is constant[None]] begin[:]
variable[artifact_set] assign[=] call[name[PinnedJarArtifactSet], parameter[]]
variable[untouched_pinned_artifact] assign[=] <ast.SetComp object at 0x7da1b2290c10>
variable[jar_list] assign[=] call[name[list], parameter[name[raw_jars]]]
for taget[tuple[[<ast.Name object at 0x7da1b2290190>, <ast.Name object at 0x7da1b22908b0>]]] in starred[call[name[enumerate], parameter[name[jar_list]]]] begin[:]
variable[direct_coord] assign[=] call[name[M2Coordinate].create, parameter[name[dep]]]
if compare[name[direct_coord] in name[artifact_set]] begin[:]
variable[managed_coord] assign[=] call[name[artifact_set]][name[direct_coord]]
call[name[untouched_pinned_artifact].remove, parameter[name[managed_coord]]]
if compare[name[direct_coord].rev not_equal[!=] name[managed_coord].rev] begin[:]
variable[coord] assign[=] call[name[manager].resolve_version_conflict, parameter[name[managed_coord], name[direct_coord]]]
call[name[jar_list]][name[i]] assign[=] call[name[dep].copy, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1d361d0>, <ast.Name object at 0x7da1b1d351e0>]]]
|
keyword[def] identifier[_compute_jars_to_resolve_and_pin] ( identifier[raw_jars] , identifier[artifact_set] , identifier[manager] ):
literal[string]
keyword[if] identifier[artifact_set] keyword[is] keyword[None] :
identifier[artifact_set] = identifier[PinnedJarArtifactSet] ()
identifier[untouched_pinned_artifact] ={ identifier[M2Coordinate] . identifier[create] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[artifact_set] }
identifier[jar_list] = identifier[list] ( identifier[raw_jars] )
keyword[for] identifier[i] , identifier[dep] keyword[in] identifier[enumerate] ( identifier[jar_list] ):
identifier[direct_coord] = identifier[M2Coordinate] . identifier[create] ( identifier[dep] )
keyword[if] identifier[direct_coord] keyword[in] identifier[artifact_set] :
identifier[managed_coord] = identifier[artifact_set] [ identifier[direct_coord] ]
identifier[untouched_pinned_artifact] . identifier[remove] ( identifier[managed_coord] )
keyword[if] identifier[direct_coord] . identifier[rev] != identifier[managed_coord] . identifier[rev] :
identifier[coord] = identifier[manager] . identifier[resolve_version_conflict] ( identifier[managed_coord] , identifier[direct_coord] , identifier[force] = identifier[dep] . identifier[force] )
identifier[jar_list] [ identifier[i] ]= identifier[dep] . identifier[copy] ( identifier[rev] = identifier[coord] . identifier[rev] , identifier[force] = keyword[True] )
keyword[return] identifier[jar_list] , identifier[untouched_pinned_artifact]
|
def _compute_jars_to_resolve_and_pin(raw_jars, artifact_set, manager):
"""
This method provides settled lists of jar dependencies and coordinates
based on conflict management.
:param raw_jars: a collection of `JarDependencies`
:param artifact_set: PinnedJarArtifactSet
:param manager: JarDependencyManagement
:return: (list of settled `JarDependency`, set of pinned `M2Coordinate`)
"""
if artifact_set is None:
artifact_set = PinnedJarArtifactSet() # depends on [control=['if'], data=['artifact_set']]
untouched_pinned_artifact = {M2Coordinate.create(x) for x in artifact_set}
jar_list = list(raw_jars)
for (i, dep) in enumerate(jar_list):
direct_coord = M2Coordinate.create(dep)
# Portion to manage pinned jars in case of conflict
if direct_coord in artifact_set:
managed_coord = artifact_set[direct_coord]
untouched_pinned_artifact.remove(managed_coord)
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
# Once a version is settled, we force it anyway
jar_list[i] = dep.copy(rev=coord.rev, force=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['direct_coord', 'artifact_set']] # depends on [control=['for'], data=[]]
return (jar_list, untouched_pinned_artifact)
|
async def skiplast(source, n):
"""Forward an asynchronous sequence, skipping the last ``n`` elements.
If ``n`` is negative, no elements are skipped.
Note: it is required to reach the ``n+1`` th element of the source
before the first element is generated.
"""
queue = collections.deque(maxlen=n if n > 0 else 0)
async with streamcontext(source) as streamer:
async for item in streamer:
if n <= 0:
yield item
continue
if len(queue) == n:
yield queue[0]
queue.append(item)
|
<ast.AsyncFunctionDef object at 0x7da18dc98f40>
|
keyword[async] keyword[def] identifier[skiplast] ( identifier[source] , identifier[n] ):
literal[string]
identifier[queue] = identifier[collections] . identifier[deque] ( identifier[maxlen] = identifier[n] keyword[if] identifier[n] > literal[int] keyword[else] literal[int] )
keyword[async] keyword[with] identifier[streamcontext] ( identifier[source] ) keyword[as] identifier[streamer] :
keyword[async] keyword[for] identifier[item] keyword[in] identifier[streamer] :
keyword[if] identifier[n] <= literal[int] :
keyword[yield] identifier[item]
keyword[continue]
keyword[if] identifier[len] ( identifier[queue] )== identifier[n] :
keyword[yield] identifier[queue] [ literal[int] ]
identifier[queue] . identifier[append] ( identifier[item] )
|
async def skiplast(source, n):
"""Forward an asynchronous sequence, skipping the last ``n`` elements.
If ``n`` is negative, no elements are skipped.
Note: it is required to reach the ``n+1`` th element of the source
before the first element is generated.
"""
queue = collections.deque(maxlen=n if n > 0 else 0)
async with streamcontext(source) as streamer:
async for item in streamer:
if n <= 0:
yield item
continue # depends on [control=['if'], data=[]]
if len(queue) == n:
yield queue[0] # depends on [control=['if'], data=[]]
queue.append(item)
|
def save(df, path, write_frequency=5000):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output path. Either a directory or an lmdb file.
write_frequency (int): the frequency to write back data to disk.
"""
assert isinstance(df, DataFlow), type(df)
isdir = os.path.isdir(path)
if isdir:
assert not os.path.isfile(os.path.join(path, 'data.mdb')), "LMDB file exists!"
else:
assert not os.path.isfile(path), "LMDB file {} exists!".format(path)
db = lmdb.open(path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True) # need sync() at the end
size = _reset_df_and_get_size(df)
with get_tqdm(total=size) as pbar:
idx = -1
# LMDB transaction is not exception-safe!
# although it has a context manager interface
txn = db.begin(write=True)
for idx, dp in enumerate(df):
txn.put(u'{:08}'.format(idx).encode('ascii'), dumps(dp))
pbar.update()
if (idx + 1) % write_frequency == 0:
txn.commit()
txn = db.begin(write=True)
txn.commit()
keys = [u'{:08}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps(keys))
logger.info("Flushing database ...")
db.sync()
db.close()
|
def function[save, parameter[df, path, write_frequency]]:
constant[
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output path. Either a directory or an lmdb file.
write_frequency (int): the frequency to write back data to disk.
]
assert[call[name[isinstance], parameter[name[df], name[DataFlow]]]]
variable[isdir] assign[=] call[name[os].path.isdir, parameter[name[path]]]
if name[isdir] begin[:]
assert[<ast.UnaryOp object at 0x7da20e74b070>]
variable[db] assign[=] call[name[lmdb].open, parameter[name[path]]]
variable[size] assign[=] call[name[_reset_df_and_get_size], parameter[name[df]]]
with call[name[get_tqdm], parameter[]] begin[:]
variable[idx] assign[=] <ast.UnaryOp object at 0x7da20e9b2f80>
variable[txn] assign[=] call[name[db].begin, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9b01c0>, <ast.Name object at 0x7da20e9b0ac0>]]] in starred[call[name[enumerate], parameter[name[df]]]] begin[:]
call[name[txn].put, parameter[call[call[constant[{:08}].format, parameter[name[idx]]].encode, parameter[constant[ascii]]], call[name[dumps], parameter[name[dp]]]]]
call[name[pbar].update, parameter[]]
if compare[binary_operation[binary_operation[name[idx] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[write_frequency]] equal[==] constant[0]] begin[:]
call[name[txn].commit, parameter[]]
variable[txn] assign[=] call[name[db].begin, parameter[]]
call[name[txn].commit, parameter[]]
variable[keys] assign[=] <ast.ListComp object at 0x7da20e9b16f0>
with call[name[db].begin, parameter[]] begin[:]
call[name[txn].put, parameter[constant[b'__keys__'], call[name[dumps], parameter[name[keys]]]]]
call[name[logger].info, parameter[constant[Flushing database ...]]]
call[name[db].sync, parameter[]]
call[name[db].close, parameter[]]
|
keyword[def] identifier[save] ( identifier[df] , identifier[path] , identifier[write_frequency] = literal[int] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[df] , identifier[DataFlow] ), identifier[type] ( identifier[df] )
identifier[isdir] = identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] )
keyword[if] identifier[isdir] :
keyword[assert] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] )), literal[string]
keyword[else] :
keyword[assert] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ), literal[string] . identifier[format] ( identifier[path] )
identifier[db] = identifier[lmdb] . identifier[open] ( identifier[path] , identifier[subdir] = identifier[isdir] ,
identifier[map_size] = literal[int] * literal[int] , identifier[readonly] = keyword[False] ,
identifier[meminit] = keyword[False] , identifier[map_async] = keyword[True] )
identifier[size] = identifier[_reset_df_and_get_size] ( identifier[df] )
keyword[with] identifier[get_tqdm] ( identifier[total] = identifier[size] ) keyword[as] identifier[pbar] :
identifier[idx] =- literal[int]
identifier[txn] = identifier[db] . identifier[begin] ( identifier[write] = keyword[True] )
keyword[for] identifier[idx] , identifier[dp] keyword[in] identifier[enumerate] ( identifier[df] ):
identifier[txn] . identifier[put] ( literal[string] . identifier[format] ( identifier[idx] ). identifier[encode] ( literal[string] ), identifier[dumps] ( identifier[dp] ))
identifier[pbar] . identifier[update] ()
keyword[if] ( identifier[idx] + literal[int] )% identifier[write_frequency] == literal[int] :
identifier[txn] . identifier[commit] ()
identifier[txn] = identifier[db] . identifier[begin] ( identifier[write] = keyword[True] )
identifier[txn] . identifier[commit] ()
identifier[keys] =[ literal[string] . identifier[format] ( identifier[k] ). identifier[encode] ( literal[string] ) keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[idx] + literal[int] )]
keyword[with] identifier[db] . identifier[begin] ( identifier[write] = keyword[True] ) keyword[as] identifier[txn] :
identifier[txn] . identifier[put] ( literal[string] , identifier[dumps] ( identifier[keys] ))
identifier[logger] . identifier[info] ( literal[string] )
identifier[db] . identifier[sync] ()
identifier[db] . identifier[close] ()
|
def save(df, path, write_frequency=5000):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output path. Either a directory or an lmdb file.
write_frequency (int): the frequency to write back data to disk.
"""
assert isinstance(df, DataFlow), type(df)
isdir = os.path.isdir(path)
if isdir:
assert not os.path.isfile(os.path.join(path, 'data.mdb')), 'LMDB file exists!' # depends on [control=['if'], data=[]]
else:
assert not os.path.isfile(path), 'LMDB file {} exists!'.format(path)
db = lmdb.open(path, subdir=isdir, map_size=1099511627776 * 2, readonly=False, meminit=False, map_async=True) # need sync() at the end
size = _reset_df_and_get_size(df)
with get_tqdm(total=size) as pbar:
idx = -1
# LMDB transaction is not exception-safe!
# although it has a context manager interface
txn = db.begin(write=True)
for (idx, dp) in enumerate(df):
txn.put(u'{:08}'.format(idx).encode('ascii'), dumps(dp))
pbar.update()
if (idx + 1) % write_frequency == 0:
txn.commit()
txn = db.begin(write=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
txn.commit()
keys = [u'{:08}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps(keys)) # depends on [control=['with'], data=['txn']]
logger.info('Flushing database ...')
db.sync() # depends on [control=['with'], data=['pbar']]
db.close()
|
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
#: return constant's name instead of constant itself
value = self.to_python(value).name
self.validate(value, model_instance)
self.run_validators(value)
return value
|
def function[clean, parameter[self, value, model_instance]]:
constant[
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
]
variable[value] assign[=] call[name[self].to_python, parameter[name[value]]].name
call[name[self].validate, parameter[name[value], name[model_instance]]]
call[name[self].run_validators, parameter[name[value]]]
return[name[value]]
|
keyword[def] identifier[clean] ( identifier[self] , identifier[value] , identifier[model_instance] ):
literal[string]
identifier[value] = identifier[self] . identifier[to_python] ( identifier[value] ). identifier[name]
identifier[self] . identifier[validate] ( identifier[value] , identifier[model_instance] )
identifier[self] . identifier[run_validators] ( identifier[value] )
keyword[return] identifier[value]
|
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
#: return constant's name instead of constant itself
value = self.to_python(value).name
self.validate(value, model_instance)
self.run_validators(value)
return value
|
def surface_velocity(msg):
"""Decode surface velocity from from a surface position message
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float, int, string): speed (kt), ground track (degree),
rate of climb/descend (ft/min), and speed type
('GS' for ground speed, 'AS' for airspeed)
"""
if common.typecode(msg) < 5 or common.typecode(msg) > 8:
raise RuntimeError("%s: Not a surface message, expecting 5<TC<8" % msg)
mb = common.hex2bin(msg)[32:]
# ground track
trk_status = int(mb[12])
if trk_status == 1:
trk = common.bin2int(mb[13:20]) * 360.0 / 128.0
trk = round(trk, 1)
else:
trk = None
# ground movment / speed
mov = common.bin2int(mb[5:12])
if mov == 0 or mov > 124:
spd = None
elif mov == 1:
spd = 0
elif mov == 124:
spd = 175
else:
movs = [2, 9, 13, 39, 94, 109, 124]
kts = [0.125, 1, 2, 15, 70, 100, 175]
i = next(m[0] for m in enumerate(movs) if m[1] > mov)
step = (kts[i] - kts[i-1]) * 1.0 / (movs[i]-movs[i-1])
spd = kts[i-1] + (mov-movs[i-1]) * step
spd = round(spd, 2)
return spd, trk, 0, 'GS'
|
def function[surface_velocity, parameter[msg]]:
constant[Decode surface velocity from from a surface position message
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float, int, string): speed (kt), ground track (degree),
rate of climb/descend (ft/min), and speed type
('GS' for ground speed, 'AS' for airspeed)
]
if <ast.BoolOp object at 0x7da1b170fc10> begin[:]
<ast.Raise object at 0x7da1b170d510>
variable[mb] assign[=] call[call[name[common].hex2bin, parameter[name[msg]]]][<ast.Slice object at 0x7da1b170f8b0>]
variable[trk_status] assign[=] call[name[int], parameter[call[name[mb]][constant[12]]]]
if compare[name[trk_status] equal[==] constant[1]] begin[:]
variable[trk] assign[=] binary_operation[binary_operation[call[name[common].bin2int, parameter[call[name[mb]][<ast.Slice object at 0x7da18dc06e00>]]] * constant[360.0]] / constant[128.0]]
variable[trk] assign[=] call[name[round], parameter[name[trk], constant[1]]]
variable[mov] assign[=] call[name[common].bin2int, parameter[call[name[mb]][<ast.Slice object at 0x7da18dc06ec0>]]]
if <ast.BoolOp object at 0x7da18dc077c0> begin[:]
variable[spd] assign[=] constant[None]
return[tuple[[<ast.Name object at 0x7da18dc04af0>, <ast.Name object at 0x7da18dc05d20>, <ast.Constant object at 0x7da18dc05030>, <ast.Constant object at 0x7da18dc05a80>]]]
|
keyword[def] identifier[surface_velocity] ( identifier[msg] ):
literal[string]
keyword[if] identifier[common] . identifier[typecode] ( identifier[msg] )< literal[int] keyword[or] identifier[common] . identifier[typecode] ( identifier[msg] )> literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[msg] )
identifier[mb] = identifier[common] . identifier[hex2bin] ( identifier[msg] )[ literal[int] :]
identifier[trk_status] = identifier[int] ( identifier[mb] [ literal[int] ])
keyword[if] identifier[trk_status] == literal[int] :
identifier[trk] = identifier[common] . identifier[bin2int] ( identifier[mb] [ literal[int] : literal[int] ])* literal[int] / literal[int]
identifier[trk] = identifier[round] ( identifier[trk] , literal[int] )
keyword[else] :
identifier[trk] = keyword[None]
identifier[mov] = identifier[common] . identifier[bin2int] ( identifier[mb] [ literal[int] : literal[int] ])
keyword[if] identifier[mov] == literal[int] keyword[or] identifier[mov] > literal[int] :
identifier[spd] = keyword[None]
keyword[elif] identifier[mov] == literal[int] :
identifier[spd] = literal[int]
keyword[elif] identifier[mov] == literal[int] :
identifier[spd] = literal[int]
keyword[else] :
identifier[movs] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[kts] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[i] = identifier[next] ( identifier[m] [ literal[int] ] keyword[for] identifier[m] keyword[in] identifier[enumerate] ( identifier[movs] ) keyword[if] identifier[m] [ literal[int] ]> identifier[mov] )
identifier[step] =( identifier[kts] [ identifier[i] ]- identifier[kts] [ identifier[i] - literal[int] ])* literal[int] /( identifier[movs] [ identifier[i] ]- identifier[movs] [ identifier[i] - literal[int] ])
identifier[spd] = identifier[kts] [ identifier[i] - literal[int] ]+( identifier[mov] - identifier[movs] [ identifier[i] - literal[int] ])* identifier[step]
identifier[spd] = identifier[round] ( identifier[spd] , literal[int] )
keyword[return] identifier[spd] , identifier[trk] , literal[int] , literal[string]
|
def surface_velocity(msg):
"""Decode surface velocity from from a surface position message
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float, int, string): speed (kt), ground track (degree),
rate of climb/descend (ft/min), and speed type
('GS' for ground speed, 'AS' for airspeed)
"""
if common.typecode(msg) < 5 or common.typecode(msg) > 8:
raise RuntimeError('%s: Not a surface message, expecting 5<TC<8' % msg) # depends on [control=['if'], data=[]]
mb = common.hex2bin(msg)[32:]
# ground track
trk_status = int(mb[12])
if trk_status == 1:
trk = common.bin2int(mb[13:20]) * 360.0 / 128.0
trk = round(trk, 1) # depends on [control=['if'], data=[]]
else:
trk = None
# ground movment / speed
mov = common.bin2int(mb[5:12])
if mov == 0 or mov > 124:
spd = None # depends on [control=['if'], data=[]]
elif mov == 1:
spd = 0 # depends on [control=['if'], data=[]]
elif mov == 124:
spd = 175 # depends on [control=['if'], data=[]]
else:
movs = [2, 9, 13, 39, 94, 109, 124]
kts = [0.125, 1, 2, 15, 70, 100, 175]
i = next((m[0] for m in enumerate(movs) if m[1] > mov))
step = (kts[i] - kts[i - 1]) * 1.0 / (movs[i] - movs[i - 1])
spd = kts[i - 1] + (mov - movs[i - 1]) * step
spd = round(spd, 2)
return (spd, trk, 0, 'GS')
|
def iterator(n: Union[Node, CompatNodeIterator, dict],
order: TreeOrder = TreeOrder.PRE_ORDER) -> CompatNodeIterator:
"""
This function has the same signature as the pre-v3 iterator()
call returning a compatibility CompatNodeIterator.
"""
if isinstance(n, CompatNodeIterator):
return CompatNodeIterator(n._nodeit.iterate(order), only_nodes=True)
elif isinstance(n, Node):
nat_it = native_iterator(n.internal_node, order)
return CompatNodeIterator(NodeIterator(nat_it), only_nodes=True)
elif isinstance(n, dict):
nat_it = native_iterator(n, order)
return CompatNodeIterator(NodeIterator(nat_it, uast()), only_nodes=True)
else:
raise WrongTypeException(
"iterator on non node or iterator type (%s)" % str(type(n))
)
|
def function[iterator, parameter[n, order]]:
constant[
This function has the same signature as the pre-v3 iterator()
call returning a compatibility CompatNodeIterator.
]
if call[name[isinstance], parameter[name[n], name[CompatNodeIterator]]] begin[:]
return[call[name[CompatNodeIterator], parameter[call[name[n]._nodeit.iterate, parameter[name[order]]]]]]
|
keyword[def] identifier[iterator] ( identifier[n] : identifier[Union] [ identifier[Node] , identifier[CompatNodeIterator] , identifier[dict] ],
identifier[order] : identifier[TreeOrder] = identifier[TreeOrder] . identifier[PRE_ORDER] )-> identifier[CompatNodeIterator] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[n] , identifier[CompatNodeIterator] ):
keyword[return] identifier[CompatNodeIterator] ( identifier[n] . identifier[_nodeit] . identifier[iterate] ( identifier[order] ), identifier[only_nodes] = keyword[True] )
keyword[elif] identifier[isinstance] ( identifier[n] , identifier[Node] ):
identifier[nat_it] = identifier[native_iterator] ( identifier[n] . identifier[internal_node] , identifier[order] )
keyword[return] identifier[CompatNodeIterator] ( identifier[NodeIterator] ( identifier[nat_it] ), identifier[only_nodes] = keyword[True] )
keyword[elif] identifier[isinstance] ( identifier[n] , identifier[dict] ):
identifier[nat_it] = identifier[native_iterator] ( identifier[n] , identifier[order] )
keyword[return] identifier[CompatNodeIterator] ( identifier[NodeIterator] ( identifier[nat_it] , identifier[uast] ()), identifier[only_nodes] = keyword[True] )
keyword[else] :
keyword[raise] identifier[WrongTypeException] (
literal[string] % identifier[str] ( identifier[type] ( identifier[n] ))
)
|
def iterator(n: Union[Node, CompatNodeIterator, dict], order: TreeOrder=TreeOrder.PRE_ORDER) -> CompatNodeIterator:
"""
This function has the same signature as the pre-v3 iterator()
call returning a compatibility CompatNodeIterator.
"""
if isinstance(n, CompatNodeIterator):
return CompatNodeIterator(n._nodeit.iterate(order), only_nodes=True) # depends on [control=['if'], data=[]]
elif isinstance(n, Node):
nat_it = native_iterator(n.internal_node, order)
return CompatNodeIterator(NodeIterator(nat_it), only_nodes=True) # depends on [control=['if'], data=[]]
elif isinstance(n, dict):
nat_it = native_iterator(n, order)
return CompatNodeIterator(NodeIterator(nat_it, uast()), only_nodes=True) # depends on [control=['if'], data=[]]
else:
raise WrongTypeException('iterator on non node or iterator type (%s)' % str(type(n)))
|
def shelter_find(self, **kwargs):
"""
shelter.find wrapper. Returns a generator of shelter record dicts
matching your search criteria.
:rtype: generator
:returns: A generator of shelter record dicts.
:raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have
reached the maximum number of records your credentials allow you
to receive.
"""
def shelter_find_parser(root, has_records):
"""
The parser that is used with the ``_do_autopaginating_api_call``
method for auto-pagination.
:param lxml.etree._Element root: The root Element in the response.
:param dict has_records: A dict that we track the loop state in.
dicts are passed by references, which is how this works.
"""
for shelter in root.find("shelters"):
has_records["has_records"] = True
record = {}
for field in shelter:
record[field.tag] = field.text
yield record
return self._do_autopaginating_api_call(
"shelter.find", kwargs, shelter_find_parser
)
|
def function[shelter_find, parameter[self]]:
constant[
shelter.find wrapper. Returns a generator of shelter record dicts
matching your search criteria.
:rtype: generator
:returns: A generator of shelter record dicts.
:raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have
reached the maximum number of records your credentials allow you
to receive.
]
def function[shelter_find_parser, parameter[root, has_records]]:
constant[
The parser that is used with the ``_do_autopaginating_api_call``
method for auto-pagination.
:param lxml.etree._Element root: The root Element in the response.
:param dict has_records: A dict that we track the loop state in.
dicts are passed by references, which is how this works.
]
for taget[name[shelter]] in starred[call[name[root].find, parameter[constant[shelters]]]] begin[:]
call[name[has_records]][constant[has_records]] assign[=] constant[True]
variable[record] assign[=] dictionary[[], []]
for taget[name[field]] in starred[name[shelter]] begin[:]
call[name[record]][name[field].tag] assign[=] name[field].text
<ast.Yield object at 0x7da1b0b47610>
return[call[name[self]._do_autopaginating_api_call, parameter[constant[shelter.find], name[kwargs], name[shelter_find_parser]]]]
|
keyword[def] identifier[shelter_find] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[shelter_find_parser] ( identifier[root] , identifier[has_records] ):
literal[string]
keyword[for] identifier[shelter] keyword[in] identifier[root] . identifier[find] ( literal[string] ):
identifier[has_records] [ literal[string] ]= keyword[True]
identifier[record] ={}
keyword[for] identifier[field] keyword[in] identifier[shelter] :
identifier[record] [ identifier[field] . identifier[tag] ]= identifier[field] . identifier[text]
keyword[yield] identifier[record]
keyword[return] identifier[self] . identifier[_do_autopaginating_api_call] (
literal[string] , identifier[kwargs] , identifier[shelter_find_parser]
)
|
def shelter_find(self, **kwargs):
"""
shelter.find wrapper. Returns a generator of shelter record dicts
matching your search criteria.
:rtype: generator
:returns: A generator of shelter record dicts.
:raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have
reached the maximum number of records your credentials allow you
to receive.
"""
def shelter_find_parser(root, has_records):
"""
The parser that is used with the ``_do_autopaginating_api_call``
method for auto-pagination.
:param lxml.etree._Element root: The root Element in the response.
:param dict has_records: A dict that we track the loop state in.
dicts are passed by references, which is how this works.
"""
for shelter in root.find('shelters'):
has_records['has_records'] = True
record = {}
for field in shelter:
record[field.tag] = field.text # depends on [control=['for'], data=['field']]
yield record # depends on [control=['for'], data=['shelter']]
return self._do_autopaginating_api_call('shelter.find', kwargs, shelter_find_parser)
|
def parse_value(cls, itype, value):
"""Parse the input value."""
parsed = None
if itype == "date":
m = RE_DATE.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day):
parsed = (year, month, day)
elif itype == "month":
m = RE_MONTH.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
if cls.validate_year(year) and cls.validate_month(month):
parsed = (year, month)
elif itype == "week":
m = RE_WEEK.match(value)
if m:
year = int(m.group('year'), 10)
week = int(m.group('week'), 10)
if cls.validate_year(year) and cls.validate_week(year, week):
parsed = (year, week)
elif itype == "time":
m = RE_TIME.match(value)
if m:
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (hour, minutes)
elif itype == "datetime-local":
m = RE_DATETIME.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if (
cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day) and
cls.validate_hour(hour) and cls.validate_minutes(minutes)
):
parsed = (year, month, day, hour, minutes)
elif itype in ("number", "range"):
m = RE_NUM.match(value)
if m:
parsed = float(m.group('value'))
return parsed
|
def function[parse_value, parameter[cls, itype, value]]:
constant[Parse the input value.]
variable[parsed] assign[=] constant[None]
if compare[name[itype] equal[==] constant[date]] begin[:]
variable[m] assign[=] call[name[RE_DATE].match, parameter[name[value]]]
if name[m] begin[:]
variable[year] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[year]]], constant[10]]]
variable[month] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[month]]], constant[10]]]
variable[day] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[day]]], constant[10]]]
if <ast.BoolOp object at 0x7da20c6e71c0> begin[:]
variable[parsed] assign[=] tuple[[<ast.Name object at 0x7da20c6e4a00>, <ast.Name object at 0x7da20c6e6350>, <ast.Name object at 0x7da20c6e6770>]]
return[name[parsed]]
|
keyword[def] identifier[parse_value] ( identifier[cls] , identifier[itype] , identifier[value] ):
literal[string]
identifier[parsed] = keyword[None]
keyword[if] identifier[itype] == literal[string] :
identifier[m] = identifier[RE_DATE] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[year] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[month] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[day] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
keyword[if] identifier[cls] . identifier[validate_year] ( identifier[year] ) keyword[and] identifier[cls] . identifier[validate_month] ( identifier[month] ) keyword[and] identifier[cls] . identifier[validate_day] ( identifier[year] , identifier[month] , identifier[day] ):
identifier[parsed] =( identifier[year] , identifier[month] , identifier[day] )
keyword[elif] identifier[itype] == literal[string] :
identifier[m] = identifier[RE_MONTH] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[year] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[month] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
keyword[if] identifier[cls] . identifier[validate_year] ( identifier[year] ) keyword[and] identifier[cls] . identifier[validate_month] ( identifier[month] ):
identifier[parsed] =( identifier[year] , identifier[month] )
keyword[elif] identifier[itype] == literal[string] :
identifier[m] = identifier[RE_WEEK] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[year] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[week] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
keyword[if] identifier[cls] . identifier[validate_year] ( identifier[year] ) keyword[and] identifier[cls] . identifier[validate_week] ( identifier[year] , identifier[week] ):
identifier[parsed] =( identifier[year] , identifier[week] )
keyword[elif] identifier[itype] == literal[string] :
identifier[m] = identifier[RE_TIME] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[hour] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[minutes] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
keyword[if] identifier[cls] . identifier[validate_hour] ( identifier[hour] ) keyword[and] identifier[cls] . identifier[validate_minutes] ( identifier[minutes] ):
identifier[parsed] =( identifier[hour] , identifier[minutes] )
keyword[elif] identifier[itype] == literal[string] :
identifier[m] = identifier[RE_DATETIME] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[year] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[month] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[day] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[hour] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
identifier[minutes] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ), literal[int] )
keyword[if] (
identifier[cls] . identifier[validate_year] ( identifier[year] ) keyword[and] identifier[cls] . identifier[validate_month] ( identifier[month] ) keyword[and] identifier[cls] . identifier[validate_day] ( identifier[year] , identifier[month] , identifier[day] ) keyword[and]
identifier[cls] . identifier[validate_hour] ( identifier[hour] ) keyword[and] identifier[cls] . identifier[validate_minutes] ( identifier[minutes] )
):
identifier[parsed] =( identifier[year] , identifier[month] , identifier[day] , identifier[hour] , identifier[minutes] )
keyword[elif] identifier[itype] keyword[in] ( literal[string] , literal[string] ):
identifier[m] = identifier[RE_NUM] . identifier[match] ( identifier[value] )
keyword[if] identifier[m] :
identifier[parsed] = identifier[float] ( identifier[m] . identifier[group] ( literal[string] ))
keyword[return] identifier[parsed]
|
def parse_value(cls, itype, value):
"""Parse the input value."""
parsed = None
if itype == 'date':
m = RE_DATE.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day):
parsed = (year, month, day) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif itype == 'month':
m = RE_MONTH.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
if cls.validate_year(year) and cls.validate_month(month):
parsed = (year, month) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif itype == 'week':
m = RE_WEEK.match(value)
if m:
year = int(m.group('year'), 10)
week = int(m.group('week'), 10)
if cls.validate_year(year) and cls.validate_week(year, week):
parsed = (year, week) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif itype == 'time':
m = RE_TIME.match(value)
if m:
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (hour, minutes) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif itype == 'datetime-local':
m = RE_DATETIME.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day) and cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (year, month, day, hour, minutes) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif itype in ('number', 'range'):
m = RE_NUM.match(value)
if m:
parsed = float(m.group('value')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return parsed
|
def _constructClient(client_version, username, user_domain, password, project_name, project_domain,
auth_url):
"""Return a novaclient from the given args."""
loader = loading.get_plugin_loader('password')
# These only work with v3
if user_domain is not None or project_domain is not None:
auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain,
password=password, project_name=project_name, project_domain_name=project_domain)
else:
auth = loader.load_from_options(auth_url=auth_url, username=username,
password=password, project_name=project_name)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess)
|
def function[_constructClient, parameter[client_version, username, user_domain, password, project_name, project_domain, auth_url]]:
constant[Return a novaclient from the given args.]
variable[loader] assign[=] call[name[loading].get_plugin_loader, parameter[constant[password]]]
if <ast.BoolOp object at 0x7da2044c2ce0> begin[:]
variable[auth] assign[=] call[name[loader].load_from_options, parameter[]]
variable[sess] assign[=] call[name[session].Session, parameter[]]
return[call[name[client].Client, parameter[name[client_version]]]]
|
keyword[def] identifier[_constructClient] ( identifier[client_version] , identifier[username] , identifier[user_domain] , identifier[password] , identifier[project_name] , identifier[project_domain] ,
identifier[auth_url] ):
literal[string]
identifier[loader] = identifier[loading] . identifier[get_plugin_loader] ( literal[string] )
keyword[if] identifier[user_domain] keyword[is] keyword[not] keyword[None] keyword[or] identifier[project_domain] keyword[is] keyword[not] keyword[None] :
identifier[auth] = identifier[loader] . identifier[load_from_options] ( identifier[auth_url] = identifier[auth_url] , identifier[username] = identifier[username] , identifier[user_domain_name] = identifier[user_domain] ,
identifier[password] = identifier[password] , identifier[project_name] = identifier[project_name] , identifier[project_domain_name] = identifier[project_domain] )
keyword[else] :
identifier[auth] = identifier[loader] . identifier[load_from_options] ( identifier[auth_url] = identifier[auth_url] , identifier[username] = identifier[username] ,
identifier[password] = identifier[password] , identifier[project_name] = identifier[project_name] )
identifier[sess] = identifier[session] . identifier[Session] ( identifier[auth] = identifier[auth] )
keyword[return] identifier[client] . identifier[Client] ( identifier[client_version] , identifier[session] = identifier[sess] )
|
def _constructClient(client_version, username, user_domain, password, project_name, project_domain, auth_url):
"""Return a novaclient from the given args."""
loader = loading.get_plugin_loader('password')
# These only work with v3
if user_domain is not None or project_domain is not None:
auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain, password=password, project_name=project_name, project_domain_name=project_domain) # depends on [control=['if'], data=[]]
else:
auth = loader.load_from_options(auth_url=auth_url, username=username, password=password, project_name=project_name)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess)
|
async def lookup(source_id: str, schema_id: str):
"""
Create a new schema object from an existing ledger schema
:param source_id: Institution's personal identification for the schema
:param schema_id: Ledger schema ID for lookup
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
data = await Schema.lookup(source_id, schema_id)
assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort()
assert data.name == 'test-licence'
assert data.handle > 0
:return: schema object
"""
try:
schema = Schema(source_id, '', '', [])
if not hasattr(Schema.lookup, "cb"):
schema.logger.debug("vcx_schema_get_attributes: Creating callback")
Schema.lookup.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
c_source_id = c_char_p(source_id.encode('utf-8'))
c_schema_id = c_char_p(schema_id.encode('utf-8'))
handle, data = await do_call('vcx_schema_get_attributes',
c_source_id,
c_schema_id,
Schema.lookup.cb)
schema.logger.debug("created schema object")
schema_result = json.loads(data.decode())
schema.attrs = schema_result['data']
schema.name = schema_result['name']
schema.version = schema_result['version']
schema.handle = handle
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema)
|
<ast.AsyncFunctionDef object at 0x7da2041dab00>
|
keyword[async] keyword[def] identifier[lookup] ( identifier[source_id] : identifier[str] , identifier[schema_id] : identifier[str] ):
literal[string]
keyword[try] :
identifier[schema] = identifier[Schema] ( identifier[source_id] , literal[string] , literal[string] ,[])
keyword[if] keyword[not] identifier[hasattr] ( identifier[Schema] . identifier[lookup] , literal[string] ):
identifier[schema] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[Schema] . identifier[lookup] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_uint32] , identifier[c_uint32] , identifier[c_uint32] , identifier[c_char_p] ))
identifier[c_source_id] = identifier[c_char_p] ( identifier[source_id] . identifier[encode] ( literal[string] ))
identifier[c_schema_id] = identifier[c_char_p] ( identifier[schema_id] . identifier[encode] ( literal[string] ))
identifier[handle] , identifier[data] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_source_id] ,
identifier[c_schema_id] ,
identifier[Schema] . identifier[lookup] . identifier[cb] )
identifier[schema] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[schema_result] = identifier[json] . identifier[loads] ( identifier[data] . identifier[decode] ())
identifier[schema] . identifier[attrs] = identifier[schema_result] [ literal[string] ]
identifier[schema] . identifier[name] = identifier[schema_result] [ literal[string] ]
identifier[schema] . identifier[version] = identifier[schema_result] [ literal[string] ]
identifier[schema] . identifier[handle] = identifier[handle]
keyword[return] identifier[schema]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[VcxError] ( identifier[ErrorCode] . identifier[InvalidSchema] )
|
async def lookup(source_id: str, schema_id: str):
"""
Create a new schema object from an existing ledger schema
:param source_id: Institution's personal identification for the schema
:param schema_id: Ledger schema ID for lookup
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
data = await Schema.lookup(source_id, schema_id)
assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort()
assert data.name == 'test-licence'
assert data.handle > 0
:return: schema object
"""
try:
schema = Schema(source_id, '', '', [])
if not hasattr(Schema.lookup, 'cb'):
schema.logger.debug('vcx_schema_get_attributes: Creating callback')
Schema.lookup.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p)) # depends on [control=['if'], data=[]]
c_source_id = c_char_p(source_id.encode('utf-8'))
c_schema_id = c_char_p(schema_id.encode('utf-8'))
(handle, data) = await do_call('vcx_schema_get_attributes', c_source_id, c_schema_id, Schema.lookup.cb)
schema.logger.debug('created schema object')
schema_result = json.loads(data.decode())
schema.attrs = schema_result['data']
schema.name = schema_result['name']
schema.version = schema_result['version']
schema.handle = handle
return schema # depends on [control=['try'], data=[]]
except KeyError:
raise VcxError(ErrorCode.InvalidSchema) # depends on [control=['except'], data=[]]
|
def wait_for_completion(self, job_id, timeout=None):
"""
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a
iceqube.exceptions.TimeoutError if timeout is exceeded before each job change.
:param job_id: the id of the job to wait for.
:param timeout: how long to wait for a job state change before timing out.
"""
while 1:
job = self.wait(job_id, timeout=timeout)
if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]:
return job
else:
continue
|
def function[wait_for_completion, parameter[self, job_id, timeout]]:
constant[
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a
iceqube.exceptions.TimeoutError if timeout is exceeded before each job change.
:param job_id: the id of the job to wait for.
:param timeout: how long to wait for a job state change before timing out.
]
while constant[1] begin[:]
variable[job] assign[=] call[name[self].wait, parameter[name[job_id]]]
if compare[name[job].state in list[[<ast.Attribute object at 0x7da1b0474a90>, <ast.Attribute object at 0x7da1b0474a30>, <ast.Attribute object at 0x7da1b0474160>]]] begin[:]
return[name[job]]
|
keyword[def] identifier[wait_for_completion] ( identifier[self] , identifier[job_id] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[while] literal[int] :
identifier[job] = identifier[self] . identifier[wait] ( identifier[job_id] , identifier[timeout] = identifier[timeout] )
keyword[if] identifier[job] . identifier[state] keyword[in] [ identifier[State] . identifier[COMPLETED] , identifier[State] . identifier[FAILED] , identifier[State] . identifier[CANCELED] ]:
keyword[return] identifier[job]
keyword[else] :
keyword[continue]
|
def wait_for_completion(self, job_id, timeout=None):
"""
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a
iceqube.exceptions.TimeoutError if timeout is exceeded before each job change.
:param job_id: the id of the job to wait for.
:param timeout: how long to wait for a job state change before timing out.
"""
while 1:
job = self.wait(job_id, timeout=timeout)
if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]:
return job # depends on [control=['if'], data=[]]
else:
continue # depends on [control=['while'], data=[]]
|
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None
|
def function[_find_corresponding_multicol_key, parameter[key, keys_multicol]]:
constant[Find the corresponding multicolumn key.]
for taget[name[mk]] in starred[name[keys_multicol]] begin[:]
if <ast.BoolOp object at 0x7da1b206baf0> begin[:]
return[name[mk]]
return[constant[None]]
|
keyword[def] identifier[_find_corresponding_multicol_key] ( identifier[key] , identifier[keys_multicol] ):
literal[string]
keyword[for] identifier[mk] keyword[in] identifier[keys_multicol] :
keyword[if] identifier[key] . identifier[startswith] ( identifier[mk] ) keyword[and] literal[string] keyword[in] identifier[key] :
keyword[return] identifier[mk]
keyword[return] keyword[None]
|
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mk']]
return None
|
def get_by_id(self, id_networkv4):
"""Get IPv4 network
:param id_networkv4: ID for NetworkIPv4
:return: IPv4 Network
"""
uri = 'api/networkv4/%s/' % id_networkv4
return super(ApiNetworkIPv4, self).get(uri)
|
def function[get_by_id, parameter[self, id_networkv4]]:
constant[Get IPv4 network
:param id_networkv4: ID for NetworkIPv4
:return: IPv4 Network
]
variable[uri] assign[=] binary_operation[constant[api/networkv4/%s/] <ast.Mod object at 0x7da2590d6920> name[id_networkv4]]
return[call[call[name[super], parameter[name[ApiNetworkIPv4], name[self]]].get, parameter[name[uri]]]]
|
keyword[def] identifier[get_by_id] ( identifier[self] , identifier[id_networkv4] ):
literal[string]
identifier[uri] = literal[string] % identifier[id_networkv4]
keyword[return] identifier[super] ( identifier[ApiNetworkIPv4] , identifier[self] ). identifier[get] ( identifier[uri] )
|
def get_by_id(self, id_networkv4):
"""Get IPv4 network
:param id_networkv4: ID for NetworkIPv4
:return: IPv4 Network
"""
uri = 'api/networkv4/%s/' % id_networkv4
return super(ApiNetworkIPv4, self).get(uri)
|
def importSignedCertificate(self, alias, certFile):
"""
This operation imports a certificate authority (CA) signed SSL
certificate into the key store.
"""
params = { "f" : "json" }
files = {"file" : certFile}
url = self._url + \
"/sslCertificates/{cert}/importSignedCertificate".format(cert=alias)
return self._post(url=url,
files=files,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
|
def function[importSignedCertificate, parameter[self, alias, certFile]]:
constant[
This operation imports a certificate authority (CA) signed SSL
certificate into the key store.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1248130>], [<ast.Constant object at 0x7da1b1248700>]]
variable[files] assign[=] dictionary[[<ast.Constant object at 0x7da1b124b460>], [<ast.Name object at 0x7da18dc98f40>]]
variable[url] assign[=] binary_operation[name[self]._url + call[constant[/sslCertificates/{cert}/importSignedCertificate].format, parameter[]]]
return[call[name[self]._post, parameter[]]]
|
keyword[def] identifier[importSignedCertificate] ( identifier[self] , identifier[alias] , identifier[certFile] ):
literal[string]
identifier[params] ={ literal[string] : literal[string] }
identifier[files] ={ literal[string] : identifier[certFile] }
identifier[url] = identifier[self] . identifier[_url] + literal[string] . identifier[format] ( identifier[cert] = identifier[alias] )
keyword[return] identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[files] = identifier[files] ,
identifier[param_dict] = identifier[params] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] )
|
def importSignedCertificate(self, alias, certFile):
"""
This operation imports a certificate authority (CA) signed SSL
certificate into the key store.
"""
params = {'f': 'json'}
files = {'file': certFile}
url = self._url + '/sslCertificates/{cert}/importSignedCertificate'.format(cert=alias)
return self._post(url=url, files=files, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
|
def int_check(*args, func=None):
"""Check if arguments are integrals."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, numbers.Integral):
name = type(var).__name__
raise ComplexError(
f'Function {func} expected integral number, {name} got instead.')
|
def function[int_check, parameter[]]:
constant[Check if arguments are integrals.]
variable[func] assign[=] <ast.BoolOp object at 0x7da18bc721a0>
for taget[name[var]] in starred[name[args]] begin[:]
if <ast.UnaryOp object at 0x7da18bc710c0> begin[:]
variable[name] assign[=] call[name[type], parameter[name[var]]].__name__
<ast.Raise object at 0x7da18bc70b20>
|
keyword[def] identifier[int_check] (* identifier[args] , identifier[func] = keyword[None] ):
literal[string]
identifier[func] = identifier[func] keyword[or] identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ]
keyword[for] identifier[var] keyword[in] identifier[args] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[var] , identifier[numbers] . identifier[Integral] ):
identifier[name] = identifier[type] ( identifier[var] ). identifier[__name__]
keyword[raise] identifier[ComplexError] (
literal[string] )
|
def int_check(*args, func=None):
"""Check if arguments are integrals."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, numbers.Integral):
name = type(var).__name__
raise ComplexError(f'Function {func} expected integral number, {name} got instead.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['var']]
|
def subnet_delete(name, virtual_network, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a subnet.
:param name: The name of the subnet to delete.
:param virtual_network: The virtual network name containing the
subnet.
:param resource_group: The resource group name assigned to the
virtual network.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.subnet_delete testsubnet testnet testgroup
'''
result = False
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
subnet = netconn.subnets.delete(
resource_group_name=resource_group,
virtual_network_name=virtual_network,
subnet_name=name
)
subnet.wait()
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
return result
|
def function[subnet_delete, parameter[name, virtual_network, resource_group]]:
constant[
.. versionadded:: 2019.2.0
Delete a subnet.
:param name: The name of the subnet to delete.
:param virtual_network: The virtual network name containing the
subnet.
:param resource_group: The resource group name assigned to the
virtual network.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.subnet_delete testsubnet testnet testgroup
]
variable[result] assign[=] constant[False]
variable[netconn] assign[=] call[call[name[__utils__]][constant[azurearm.get_client]], parameter[constant[network]]]
<ast.Try object at 0x7da20c6a9f90>
return[name[result]]
|
keyword[def] identifier[subnet_delete] ( identifier[name] , identifier[virtual_network] , identifier[resource_group] ,** identifier[kwargs] ):
literal[string]
identifier[result] = keyword[False]
identifier[netconn] = identifier[__utils__] [ literal[string] ]( literal[string] ,** identifier[kwargs] )
keyword[try] :
identifier[subnet] = identifier[netconn] . identifier[subnets] . identifier[delete] (
identifier[resource_group_name] = identifier[resource_group] ,
identifier[virtual_network_name] = identifier[virtual_network] ,
identifier[subnet_name] = identifier[name]
)
identifier[subnet] . identifier[wait] ()
identifier[result] = keyword[True]
keyword[except] identifier[CloudError] keyword[as] identifier[exc] :
identifier[__utils__] [ literal[string] ]( literal[string] , identifier[str] ( identifier[exc] ),** identifier[kwargs] )
keyword[return] identifier[result]
|
def subnet_delete(name, virtual_network, resource_group, **kwargs):
"""
.. versionadded:: 2019.2.0
Delete a subnet.
:param name: The name of the subnet to delete.
:param virtual_network: The virtual network name containing the
subnet.
:param resource_group: The resource group name assigned to the
virtual network.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.subnet_delete testsubnet testnet testgroup
"""
result = False
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
subnet = netconn.subnets.delete(resource_group_name=resource_group, virtual_network_name=virtual_network, subnet_name=name)
subnet.wait()
result = True # depends on [control=['try'], data=[]]
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) # depends on [control=['except'], data=['exc']]
return result
|
def tab(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval.
**中文文档**
以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.tab_key, n, interval)
self.delay(post_dl)
|
def function[tab, parameter[self, n, interval, pre_dl, post_dl]]:
constant[Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval.
**中文文档**
以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
]
call[name[self].delay, parameter[name[pre_dl]]]
call[name[self].k.tap_key, parameter[name[self].k.tab_key, name[n], name[interval]]]
call[name[self].delay, parameter[name[post_dl]]]
|
keyword[def] identifier[tab] ( identifier[self] , identifier[n] = literal[int] , identifier[interval] = literal[int] , identifier[pre_dl] = keyword[None] , identifier[post_dl] = keyword[None] ):
literal[string]
identifier[self] . identifier[delay] ( identifier[pre_dl] )
identifier[self] . identifier[k] . identifier[tap_key] ( identifier[self] . identifier[k] . identifier[tab_key] , identifier[n] , identifier[interval] )
identifier[self] . identifier[delay] ( identifier[post_dl] )
|
def tab(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval.
**中文文档**
以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.tab_key, n, interval)
self.delay(post_dl)
|
def file_set_properties(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
"""
return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs)
|
def function[file_set_properties, parameter[object_id, input_params, always_retry]]:
constant[
Invokes the /file-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
]
return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/setProperties] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]]
|
keyword[def] identifier[file_set_properties] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] )
|
def file_set_properties(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
"""
return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs)
|
def add_firewall_rule(self, direction, action, src=None, dst=None):
"""Adds a firewall rule to the router.
The TunTap router includes a very simple firewall for governing vassal's traffic.
The first matching rule stops the chain, if no rule applies, the policy is "allow".
:param str|unicode direction: Direction:
* in
* out
:param str|unicode action: Action:
* allow
* deny
:param str|unicode src: Source/mask.
:param str|unicode dst: Destination/mask
"""
value = [action]
if src:
value.extend((src, dst))
self._set_aliased('router-firewall-%s' % direction.lower(), ' '.join(value), multi=True)
return self
|
def function[add_firewall_rule, parameter[self, direction, action, src, dst]]:
constant[Adds a firewall rule to the router.
The TunTap router includes a very simple firewall for governing vassal's traffic.
The first matching rule stops the chain, if no rule applies, the policy is "allow".
:param str|unicode direction: Direction:
* in
* out
:param str|unicode action: Action:
* allow
* deny
:param str|unicode src: Source/mask.
:param str|unicode dst: Destination/mask
]
variable[value] assign[=] list[[<ast.Name object at 0x7da20c6a8e80>]]
if name[src] begin[:]
call[name[value].extend, parameter[tuple[[<ast.Name object at 0x7da1b10d6a70>, <ast.Name object at 0x7da1b10d4040>]]]]
call[name[self]._set_aliased, parameter[binary_operation[constant[router-firewall-%s] <ast.Mod object at 0x7da2590d6920> call[name[direction].lower, parameter[]]], call[constant[ ].join, parameter[name[value]]]]]
return[name[self]]
|
keyword[def] identifier[add_firewall_rule] ( identifier[self] , identifier[direction] , identifier[action] , identifier[src] = keyword[None] , identifier[dst] = keyword[None] ):
literal[string]
identifier[value] =[ identifier[action] ]
keyword[if] identifier[src] :
identifier[value] . identifier[extend] (( identifier[src] , identifier[dst] ))
identifier[self] . identifier[_set_aliased] ( literal[string] % identifier[direction] . identifier[lower] (), literal[string] . identifier[join] ( identifier[value] ), identifier[multi] = keyword[True] )
keyword[return] identifier[self]
|
def add_firewall_rule(self, direction, action, src=None, dst=None):
"""Adds a firewall rule to the router.
The TunTap router includes a very simple firewall for governing vassal's traffic.
The first matching rule stops the chain, if no rule applies, the policy is "allow".
:param str|unicode direction: Direction:
* in
* out
:param str|unicode action: Action:
* allow
* deny
:param str|unicode src: Source/mask.
:param str|unicode dst: Destination/mask
"""
value = [action]
if src:
value.extend((src, dst)) # depends on [control=['if'], data=[]]
self._set_aliased('router-firewall-%s' % direction.lower(), ' '.join(value), multi=True)
return self
|
def unpack(self, fmt):
"""
unpacks the given fmt from the underlying stream and returns the
results. Will raise an UnpackException if there is not enough
data to satisfy the fmt
"""
sfmt = compile_struct(fmt)
size = sfmt.size
if not self.data:
raise UnpackException(fmt, size, 0)
buff = self.data.read(size)
if len(buff) < size:
raise UnpackException(fmt, size, len(buff))
return sfmt.unpack(buff)
|
def function[unpack, parameter[self, fmt]]:
constant[
unpacks the given fmt from the underlying stream and returns the
results. Will raise an UnpackException if there is not enough
data to satisfy the fmt
]
variable[sfmt] assign[=] call[name[compile_struct], parameter[name[fmt]]]
variable[size] assign[=] name[sfmt].size
if <ast.UnaryOp object at 0x7da20e963d90> begin[:]
<ast.Raise object at 0x7da1b0cced70>
variable[buff] assign[=] call[name[self].data.read, parameter[name[size]]]
if compare[call[name[len], parameter[name[buff]]] less[<] name[size]] begin[:]
<ast.Raise object at 0x7da1b0c885e0>
return[call[name[sfmt].unpack, parameter[name[buff]]]]
|
keyword[def] identifier[unpack] ( identifier[self] , identifier[fmt] ):
literal[string]
identifier[sfmt] = identifier[compile_struct] ( identifier[fmt] )
identifier[size] = identifier[sfmt] . identifier[size]
keyword[if] keyword[not] identifier[self] . identifier[data] :
keyword[raise] identifier[UnpackException] ( identifier[fmt] , identifier[size] , literal[int] )
identifier[buff] = identifier[self] . identifier[data] . identifier[read] ( identifier[size] )
keyword[if] identifier[len] ( identifier[buff] )< identifier[size] :
keyword[raise] identifier[UnpackException] ( identifier[fmt] , identifier[size] , identifier[len] ( identifier[buff] ))
keyword[return] identifier[sfmt] . identifier[unpack] ( identifier[buff] )
|
def unpack(self, fmt):
"""
unpacks the given fmt from the underlying stream and returns the
results. Will raise an UnpackException if there is not enough
data to satisfy the fmt
"""
sfmt = compile_struct(fmt)
size = sfmt.size
if not self.data:
raise UnpackException(fmt, size, 0) # depends on [control=['if'], data=[]]
buff = self.data.read(size)
if len(buff) < size:
raise UnpackException(fmt, size, len(buff)) # depends on [control=['if'], data=['size']]
return sfmt.unpack(buff)
|
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
|
def function[_ensure_sparse_format, parameter[spmatrix, accept_sparse, dtype, order, copy, force_all_finite]]:
constant[Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
]
if compare[name[accept_sparse] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6c4580>
variable[sparse_type] assign[=] name[spmatrix].format
if compare[name[dtype] is constant[None]] begin[:]
variable[dtype] assign[=] name[spmatrix].dtype
if compare[name[sparse_type] in name[accept_sparse]] begin[:]
if compare[name[dtype] equal[==] name[spmatrix].dtype] begin[:]
if name[copy] begin[:]
variable[spmatrix] assign[=] call[name[spmatrix].copy, parameter[]]
if name[force_all_finite] begin[:]
if <ast.UnaryOp object at 0x7da18eb54c70> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Can't check %s sparse matrix for nan or inf.] <ast.Mod object at 0x7da2590d6920> name[spmatrix].format]]]
if call[name[hasattr], parameter[name[spmatrix], constant[data]]] begin[:]
name[spmatrix].data assign[=] call[name[np].array, parameter[name[spmatrix].data]]
return[name[spmatrix]]
|
keyword[def] identifier[_ensure_sparse_format] ( identifier[spmatrix] , identifier[accept_sparse] , identifier[dtype] , identifier[order] , identifier[copy] ,
identifier[force_all_finite] ):
literal[string]
keyword[if] identifier[accept_sparse] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] )
identifier[sparse_type] = identifier[spmatrix] . identifier[format]
keyword[if] identifier[dtype] keyword[is] keyword[None] :
identifier[dtype] = identifier[spmatrix] . identifier[dtype]
keyword[if] identifier[sparse_type] keyword[in] identifier[accept_sparse] :
keyword[if] identifier[dtype] == identifier[spmatrix] . identifier[dtype] :
keyword[if] identifier[copy] :
identifier[spmatrix] = identifier[spmatrix] . identifier[copy] ()
keyword[else] :
identifier[spmatrix] = identifier[spmatrix] . identifier[astype] ( identifier[dtype] )
keyword[else] :
identifier[spmatrix] = identifier[spmatrix] . identifier[asformat] ( identifier[accept_sparse] [ literal[int] ]). identifier[astype] ( identifier[dtype] )
keyword[if] identifier[force_all_finite] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[spmatrix] , literal[string] ):
identifier[warnings] . identifier[warn] ( literal[string]
% identifier[spmatrix] . identifier[format] )
keyword[else] :
identifier[_assert_all_finite] ( identifier[spmatrix] . identifier[data] )
keyword[if] identifier[hasattr] ( identifier[spmatrix] , literal[string] ):
identifier[spmatrix] . identifier[data] = identifier[np] . identifier[array] ( identifier[spmatrix] . identifier[data] , identifier[copy] = keyword[False] , identifier[order] = identifier[order] )
keyword[return] identifier[spmatrix]
|
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy, force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.') # depends on [control=['if'], data=[]]
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype # depends on [control=['if'], data=['dtype']]
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# convert dtype
spmatrix = spmatrix.astype(dtype) # depends on [control=['if'], data=[]]
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, 'data'):
warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) # depends on [control=['if'], data=[]]
else:
_assert_all_finite(spmatrix.data) # depends on [control=['if'], data=[]]
if hasattr(spmatrix, 'data'):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order) # depends on [control=['if'], data=[]]
return spmatrix
|
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
"""
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
response = client_cw.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName=metric,
Dimensions=[
{
'Name': 'FunctionName',
'Value': name
},
],
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
StartTime=maya.now().subtract(days=1).datetime(),
EndTime=maya.now().datetime(),
Period=3600,
Statistics=[
'Sum',
],
Unit=unit(metric)
)
log.info('\t%s %s' % (metric,
repr(aggregate_datapoints(response['Datapoints']))))
return 0
|
def function[get_metrics, parameter[awsclient, name]]:
constant[Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
]
variable[metrics] assign[=] list[[<ast.Constant object at 0x7da20e74b3d0>, <ast.Constant object at 0x7da20e748640>, <ast.Constant object at 0x7da20e74a770>, <ast.Constant object at 0x7da20e74bd90>]]
variable[client_cw] assign[=] call[name[awsclient].get_client, parameter[constant[cloudwatch]]]
for taget[name[metric]] in starred[name[metrics]] begin[:]
variable[response] assign[=] call[name[client_cw].get_metric_statistics, parameter[]]
call[name[log].info, parameter[binary_operation[constant[ %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204621c60>, <ast.Call object at 0x7da2046221d0>]]]]]
return[constant[0]]
|
keyword[def] identifier[get_metrics] ( identifier[awsclient] , identifier[name] ):
literal[string]
identifier[metrics] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[client_cw] = identifier[awsclient] . identifier[get_client] ( literal[string] )
keyword[for] identifier[metric] keyword[in] identifier[metrics] :
identifier[response] = identifier[client_cw] . identifier[get_metric_statistics] (
identifier[Namespace] = literal[string] ,
identifier[MetricName] = identifier[metric] ,
identifier[Dimensions] =[
{
literal[string] : literal[string] ,
literal[string] : identifier[name]
},
],
identifier[StartTime] = identifier[maya] . identifier[now] (). identifier[subtract] ( identifier[days] = literal[int] ). identifier[datetime] (),
identifier[EndTime] = identifier[maya] . identifier[now] (). identifier[datetime] (),
identifier[Period] = literal[int] ,
identifier[Statistics] =[
literal[string] ,
],
identifier[Unit] = identifier[unit] ( identifier[metric] )
)
identifier[log] . identifier[info] ( literal[string] %( identifier[metric] ,
identifier[repr] ( identifier[aggregate_datapoints] ( identifier[response] [ literal[string] ]))))
keyword[return] literal[int]
|
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
"""
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
response = client_cw.get_metric_statistics(Namespace='AWS/Lambda', MetricName=metric, Dimensions=[{'Name': 'FunctionName', 'Value': name}], StartTime=maya.now().subtract(days=1).datetime(), EndTime=maya.now().datetime(), Period=3600, Statistics=['Sum'], Unit=unit(metric))
log.info('\t%s %s' % (metric, repr(aggregate_datapoints(response['Datapoints'])))) # depends on [control=['for'], data=['metric']]
return 0
|
def getrawblob(self, project_id, sha1):
"""
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
"""
request = requests.get(
'{0}/{1}/repository/raw_blobs/{2}'.format(self.projects_url, project_id, sha1),
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.content
else:
return False
|
def function[getrawblob, parameter[self, project_id, sha1]]:
constant[
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
]
variable[request] assign[=] call[name[requests].get, parameter[call[constant[{0}/{1}/repository/raw_blobs/{2}].format, parameter[name[self].projects_url, name[project_id], name[sha1]]]]]
if compare[name[request].status_code equal[==] constant[200]] begin[:]
return[name[request].content]
|
keyword[def] identifier[getrawblob] ( identifier[self] , identifier[project_id] , identifier[sha1] ):
literal[string]
identifier[request] = identifier[requests] . identifier[get] (
literal[string] . identifier[format] ( identifier[self] . identifier[projects_url] , identifier[project_id] , identifier[sha1] ),
identifier[verify] = identifier[self] . identifier[verify_ssl] , identifier[auth] = identifier[self] . identifier[auth] , identifier[headers] = identifier[self] . identifier[headers] , identifier[timeout] = identifier[self] . identifier[timeout] )
keyword[if] identifier[request] . identifier[status_code] == literal[int] :
keyword[return] identifier[request] . identifier[content]
keyword[else] :
keyword[return] keyword[False]
|
def getrawblob(self, project_id, sha1):
"""
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
"""
request = requests.get('{0}/{1}/repository/raw_blobs/{2}'.format(self.projects_url, project_id, sha1), verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.content # depends on [control=['if'], data=[]]
else:
return False
|
def remove_neighbours(self):
"""
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
"""
# Get the HEALPix cells of the MOC at its max depth
ipix = self._best_res_pixels()
hp = HEALPix(nside=(1 << self.max_order), order='nested')
# Extend it to include the max depth neighbor cells.
extend_ipix = AbstractMOC._neighbour_pixels(hp, ipix)
# Get only the max depth HEALPix cells lying at the border of the MOC
neigh_ipix = np.setxor1d(extend_ipix, ipix)
# Remove these pixels from ``ipix``
border_ipix = AbstractMOC._neighbour_pixels(hp, neigh_ipix)
reduced_ipix = np.setdiff1d(ipix, border_ipix)
# Build the reduced MOC, i.e. MOC without its pixels which were located at its border.
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order)
reduced_itv = np.vstack((reduced_ipix << shift, (reduced_ipix + 1) << shift)).T
self._interval_set = IntervalSet(reduced_itv)
return self
|
def function[remove_neighbours, parameter[self]]:
constant[
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
]
variable[ipix] assign[=] call[name[self]._best_res_pixels, parameter[]]
variable[hp] assign[=] call[name[HEALPix], parameter[]]
variable[extend_ipix] assign[=] call[name[AbstractMOC]._neighbour_pixels, parameter[name[hp], name[ipix]]]
variable[neigh_ipix] assign[=] call[name[np].setxor1d, parameter[name[extend_ipix], name[ipix]]]
variable[border_ipix] assign[=] call[name[AbstractMOC]._neighbour_pixels, parameter[name[hp], name[neigh_ipix]]]
variable[reduced_ipix] assign[=] call[name[np].setdiff1d, parameter[name[ipix], name[border_ipix]]]
variable[shift] assign[=] binary_operation[constant[2] * binary_operation[name[AbstractMOC].HPY_MAX_NORDER - name[self].max_order]]
variable[reduced_itv] assign[=] call[name[np].vstack, parameter[tuple[[<ast.BinOp object at 0x7da18c4ce7d0>, <ast.BinOp object at 0x7da18c4cfe20>]]]].T
name[self]._interval_set assign[=] call[name[IntervalSet], parameter[name[reduced_itv]]]
return[name[self]]
|
keyword[def] identifier[remove_neighbours] ( identifier[self] ):
literal[string]
identifier[ipix] = identifier[self] . identifier[_best_res_pixels] ()
identifier[hp] = identifier[HEALPix] ( identifier[nside] =( literal[int] << identifier[self] . identifier[max_order] ), identifier[order] = literal[string] )
identifier[extend_ipix] = identifier[AbstractMOC] . identifier[_neighbour_pixels] ( identifier[hp] , identifier[ipix] )
identifier[neigh_ipix] = identifier[np] . identifier[setxor1d] ( identifier[extend_ipix] , identifier[ipix] )
identifier[border_ipix] = identifier[AbstractMOC] . identifier[_neighbour_pixels] ( identifier[hp] , identifier[neigh_ipix] )
identifier[reduced_ipix] = identifier[np] . identifier[setdiff1d] ( identifier[ipix] , identifier[border_ipix] )
identifier[shift] = literal[int] *( identifier[AbstractMOC] . identifier[HPY_MAX_NORDER] - identifier[self] . identifier[max_order] )
identifier[reduced_itv] = identifier[np] . identifier[vstack] (( identifier[reduced_ipix] << identifier[shift] ,( identifier[reduced_ipix] + literal[int] )<< identifier[shift] )). identifier[T]
identifier[self] . identifier[_interval_set] = identifier[IntervalSet] ( identifier[reduced_itv] )
keyword[return] identifier[self]
|
def remove_neighbours(self):
"""
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
"""
# Get the HEALPix cells of the MOC at its max depth
ipix = self._best_res_pixels()
hp = HEALPix(nside=1 << self.max_order, order='nested')
# Extend it to include the max depth neighbor cells.
extend_ipix = AbstractMOC._neighbour_pixels(hp, ipix)
# Get only the max depth HEALPix cells lying at the border of the MOC
neigh_ipix = np.setxor1d(extend_ipix, ipix)
# Remove these pixels from ``ipix``
border_ipix = AbstractMOC._neighbour_pixels(hp, neigh_ipix)
reduced_ipix = np.setdiff1d(ipix, border_ipix)
# Build the reduced MOC, i.e. MOC without its pixels which were located at its border.
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order)
reduced_itv = np.vstack((reduced_ipix << shift, reduced_ipix + 1 << shift)).T
self._interval_set = IntervalSet(reduced_itv)
return self
|
def fix_paths(project_data, rel_path, extensions):
""" Fix paths for extension list """
norm_func = lambda path : os.path.normpath(os.path.join(rel_path, path))
for key in extensions:
if type(project_data[key]) is dict:
for k,v in project_data[key].items():
project_data[key][k] = [norm_func(i) for i in v]
elif type(project_data[key]) is list:
project_data[key] = [norm_func(i) for i in project_data[key]]
else:
project_data[key] = norm_func(project_data[key])
|
def function[fix_paths, parameter[project_data, rel_path, extensions]]:
constant[ Fix paths for extension list ]
variable[norm_func] assign[=] <ast.Lambda object at 0x7da1b0c92bc0>
for taget[name[key]] in starred[name[extensions]] begin[:]
if compare[call[name[type], parameter[call[name[project_data]][name[key]]]] is name[dict]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0c92c20>, <ast.Name object at 0x7da1b0c91510>]]] in starred[call[call[name[project_data]][name[key]].items, parameter[]]] begin[:]
call[call[name[project_data]][name[key]]][name[k]] assign[=] <ast.ListComp object at 0x7da1b0c938e0>
|
keyword[def] identifier[fix_paths] ( identifier[project_data] , identifier[rel_path] , identifier[extensions] ):
literal[string]
identifier[norm_func] = keyword[lambda] identifier[path] : identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[rel_path] , identifier[path] ))
keyword[for] identifier[key] keyword[in] identifier[extensions] :
keyword[if] identifier[type] ( identifier[project_data] [ identifier[key] ]) keyword[is] identifier[dict] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[project_data] [ identifier[key] ]. identifier[items] ():
identifier[project_data] [ identifier[key] ][ identifier[k] ]=[ identifier[norm_func] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[v] ]
keyword[elif] identifier[type] ( identifier[project_data] [ identifier[key] ]) keyword[is] identifier[list] :
identifier[project_data] [ identifier[key] ]=[ identifier[norm_func] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[project_data] [ identifier[key] ]]
keyword[else] :
identifier[project_data] [ identifier[key] ]= identifier[norm_func] ( identifier[project_data] [ identifier[key] ])
|
def fix_paths(project_data, rel_path, extensions):
""" Fix paths for extension list """
norm_func = lambda path: os.path.normpath(os.path.join(rel_path, path))
for key in extensions:
if type(project_data[key]) is dict:
for (k, v) in project_data[key].items():
project_data[key][k] = [norm_func(i) for i in v] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif type(project_data[key]) is list:
project_data[key] = [norm_func(i) for i in project_data[key]] # depends on [control=['if'], data=[]]
else:
project_data[key] = norm_func(project_data[key]) # depends on [control=['for'], data=['key']]
|
def list():
"""Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started.
"""
infos = manager.get_all()
if not infos:
print("No known TensorBoard instances running.")
return
print("Known TensorBoard instances:")
for info in infos:
template = " - port {port}: {data_source} (started {delta} ago; pid {pid})"
print(template.format(
port=info.port,
data_source=manager.data_source_from_info(info),
delta=_time_delta_from_info(info),
pid=info.pid,
))
|
def function[list, parameter[]]:
constant[Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started.
]
variable[infos] assign[=] call[name[manager].get_all, parameter[]]
if <ast.UnaryOp object at 0x7da1b216b0a0> begin[:]
call[name[print], parameter[constant[No known TensorBoard instances running.]]]
return[None]
call[name[print], parameter[constant[Known TensorBoard instances:]]]
for taget[name[info]] in starred[name[infos]] begin[:]
variable[template] assign[=] constant[ - port {port}: {data_source} (started {delta} ago; pid {pid})]
call[name[print], parameter[call[name[template].format, parameter[]]]]
|
keyword[def] identifier[list] ():
literal[string]
identifier[infos] = identifier[manager] . identifier[get_all] ()
keyword[if] keyword[not] identifier[infos] :
identifier[print] ( literal[string] )
keyword[return]
identifier[print] ( literal[string] )
keyword[for] identifier[info] keyword[in] identifier[infos] :
identifier[template] = literal[string]
identifier[print] ( identifier[template] . identifier[format] (
identifier[port] = identifier[info] . identifier[port] ,
identifier[data_source] = identifier[manager] . identifier[data_source_from_info] ( identifier[info] ),
identifier[delta] = identifier[_time_delta_from_info] ( identifier[info] ),
identifier[pid] = identifier[info] . identifier[pid] ,
))
|
def list():
"""Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started.
"""
infos = manager.get_all()
if not infos:
print('No known TensorBoard instances running.')
return # depends on [control=['if'], data=[]]
print('Known TensorBoard instances:')
for info in infos:
template = ' - port {port}: {data_source} (started {delta} ago; pid {pid})'
print(template.format(port=info.port, data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), pid=info.pid)) # depends on [control=['for'], data=['info']]
|
def _maybe_call_fn(fn,
fn_arg_list,
fn_result=None,
description='target_log_prob'):
"""Helper which computes `fn_result` if needed."""
fn_arg_list = (list(fn_arg_list) if mcmc_util.is_list_like(fn_arg_list)
else [fn_arg_list])
if fn_result is None:
fn_result = fn(*fn_arg_list)
if not fn_result.dtype.is_floating:
raise TypeError('`{}` must be a `Tensor` with `float` `dtype`.'.format(
description))
return fn_result
|
def function[_maybe_call_fn, parameter[fn, fn_arg_list, fn_result, description]]:
constant[Helper which computes `fn_result` if needed.]
variable[fn_arg_list] assign[=] <ast.IfExp object at 0x7da1b03226b0>
if compare[name[fn_result] is constant[None]] begin[:]
variable[fn_result] assign[=] call[name[fn], parameter[<ast.Starred object at 0x7da1b03b9030>]]
if <ast.UnaryOp object at 0x7da1b05bc790> begin[:]
<ast.Raise object at 0x7da1b05be980>
return[name[fn_result]]
|
keyword[def] identifier[_maybe_call_fn] ( identifier[fn] ,
identifier[fn_arg_list] ,
identifier[fn_result] = keyword[None] ,
identifier[description] = literal[string] ):
literal[string]
identifier[fn_arg_list] =( identifier[list] ( identifier[fn_arg_list] ) keyword[if] identifier[mcmc_util] . identifier[is_list_like] ( identifier[fn_arg_list] )
keyword[else] [ identifier[fn_arg_list] ])
keyword[if] identifier[fn_result] keyword[is] keyword[None] :
identifier[fn_result] = identifier[fn] (* identifier[fn_arg_list] )
keyword[if] keyword[not] identifier[fn_result] . identifier[dtype] . identifier[is_floating] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[description] ))
keyword[return] identifier[fn_result]
|
def _maybe_call_fn(fn, fn_arg_list, fn_result=None, description='target_log_prob'):
"""Helper which computes `fn_result` if needed."""
fn_arg_list = list(fn_arg_list) if mcmc_util.is_list_like(fn_arg_list) else [fn_arg_list]
if fn_result is None:
fn_result = fn(*fn_arg_list) # depends on [control=['if'], data=['fn_result']]
if not fn_result.dtype.is_floating:
raise TypeError('`{}` must be a `Tensor` with `float` `dtype`.'.format(description)) # depends on [control=['if'], data=[]]
return fn_result
|
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
|
def function[_round_frac, parameter[x, precision]]:
constant[
Round the fractional part of the given number
]
if <ast.BoolOp object at 0x7da18dc989d0> begin[:]
return[name[x]]
|
keyword[def] identifier[_round_frac] ( identifier[x] , identifier[precision] ):
literal[string]
keyword[if] keyword[not] identifier[np] . identifier[isfinite] ( identifier[x] ) keyword[or] identifier[x] == literal[int] :
keyword[return] identifier[x]
keyword[else] :
identifier[frac] , identifier[whole] = identifier[np] . identifier[modf] ( identifier[x] )
keyword[if] identifier[whole] == literal[int] :
identifier[digits] =- identifier[int] ( identifier[np] . identifier[floor] ( identifier[np] . identifier[log10] ( identifier[abs] ( identifier[frac] ))))- literal[int] + identifier[precision]
keyword[else] :
identifier[digits] = identifier[precision]
keyword[return] identifier[np] . identifier[around] ( identifier[x] , identifier[digits] )
|
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x # depends on [control=['if'], data=[]]
else:
(frac, whole) = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision # depends on [control=['if'], data=[]]
else:
digits = precision
return np.around(x, digits)
|
def cmd(send, msg, args):
"""Handles permissions
Syntax: {command} (--add|--remove) --nick (nick) --role (admin)
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('--nick', action=arguments.NickParser, required=True)
parser.add_argument('--role', choices=['admin'], required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true')
group.add_argument('--remove', action='store_true')
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
session = args['db']
admin = session.query(Permissions).filter(Permissions.nick == cmdargs.nick).first()
if cmdargs.add:
if admin is None:
session.add(Permissions(nick=cmdargs.nick, role=cmdargs.role))
send("%s is now an %s." % (cmdargs.nick, cmdargs.role))
else:
send("%s is already an %s." % (admin.nick, admin.role))
else:
if admin is None:
send("%s was not an %s." % (cmdargs.nick, cmdargs.role))
else:
session.delete(admin)
send("%s is no longer an %s." % (admin.nick, admin.role))
|
def function[cmd, parameter[send, msg, args]]:
constant[Handles permissions
Syntax: {command} (--add|--remove) --nick (nick) --role (admin)
]
variable[parser] assign[=] call[name[arguments].ArgParser, parameter[call[name[args]][constant[config]]]]
call[name[parser].add_argument, parameter[constant[--nick]]]
call[name[parser].add_argument, parameter[constant[--role]]]
variable[group] assign[=] call[name[parser].add_mutually_exclusive_group, parameter[]]
call[name[group].add_argument, parameter[constant[--add]]]
call[name[group].add_argument, parameter[constant[--remove]]]
<ast.Try object at 0x7da1b1fb8430>
variable[session] assign[=] call[name[args]][constant[db]]
variable[admin] assign[=] call[call[call[name[session].query, parameter[name[Permissions]]].filter, parameter[compare[name[Permissions].nick equal[==] name[cmdargs].nick]]].first, parameter[]]
if name[cmdargs].add begin[:]
if compare[name[admin] is constant[None]] begin[:]
call[name[session].add, parameter[call[name[Permissions], parameter[]]]]
call[name[send], parameter[binary_operation[constant[%s is now an %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1fba110>, <ast.Attribute object at 0x7da1b1fba0e0>]]]]]
|
keyword[def] identifier[cmd] ( identifier[send] , identifier[msg] , identifier[args] ):
literal[string]
identifier[parser] = identifier[arguments] . identifier[ArgParser] ( identifier[args] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = identifier[arguments] . identifier[NickParser] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[choices] =[ literal[string] ], identifier[required] = keyword[True] )
identifier[group] = identifier[parser] . identifier[add_mutually_exclusive_group] ( identifier[required] = keyword[True] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] )
identifier[group] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] )
keyword[try] :
identifier[cmdargs] = identifier[parser] . identifier[parse_args] ( identifier[msg] )
keyword[except] identifier[arguments] . identifier[ArgumentException] keyword[as] identifier[e] :
identifier[send] ( identifier[str] ( identifier[e] ))
keyword[return]
identifier[session] = identifier[args] [ literal[string] ]
identifier[admin] = identifier[session] . identifier[query] ( identifier[Permissions] ). identifier[filter] ( identifier[Permissions] . identifier[nick] == identifier[cmdargs] . identifier[nick] ). identifier[first] ()
keyword[if] identifier[cmdargs] . identifier[add] :
keyword[if] identifier[admin] keyword[is] keyword[None] :
identifier[session] . identifier[add] ( identifier[Permissions] ( identifier[nick] = identifier[cmdargs] . identifier[nick] , identifier[role] = identifier[cmdargs] . identifier[role] ))
identifier[send] ( literal[string] %( identifier[cmdargs] . identifier[nick] , identifier[cmdargs] . identifier[role] ))
keyword[else] :
identifier[send] ( literal[string] %( identifier[admin] . identifier[nick] , identifier[admin] . identifier[role] ))
keyword[else] :
keyword[if] identifier[admin] keyword[is] keyword[None] :
identifier[send] ( literal[string] %( identifier[cmdargs] . identifier[nick] , identifier[cmdargs] . identifier[role] ))
keyword[else] :
identifier[session] . identifier[delete] ( identifier[admin] )
identifier[send] ( literal[string] %( identifier[admin] . identifier[nick] , identifier[admin] . identifier[role] ))
|
def cmd(send, msg, args):
"""Handles permissions
Syntax: {command} (--add|--remove) --nick (nick) --role (admin)
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('--nick', action=arguments.NickParser, required=True)
parser.add_argument('--role', choices=['admin'], required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true')
group.add_argument('--remove', action='store_true')
try:
cmdargs = parser.parse_args(msg) # depends on [control=['try'], data=[]]
except arguments.ArgumentException as e:
send(str(e))
return # depends on [control=['except'], data=['e']]
session = args['db']
admin = session.query(Permissions).filter(Permissions.nick == cmdargs.nick).first()
if cmdargs.add:
if admin is None:
session.add(Permissions(nick=cmdargs.nick, role=cmdargs.role))
send('%s is now an %s.' % (cmdargs.nick, cmdargs.role)) # depends on [control=['if'], data=[]]
else:
send('%s is already an %s.' % (admin.nick, admin.role)) # depends on [control=['if'], data=[]]
elif admin is None:
send('%s was not an %s.' % (cmdargs.nick, cmdargs.role)) # depends on [control=['if'], data=[]]
else:
session.delete(admin)
send('%s is no longer an %s.' % (admin.nick, admin.role))
|
def qw(words,flat=0,sep=None,maxsplit=-1):
"""Similar to Perl's qw() operator, but with some more options.
qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit)
words can also be a list itself, and with flat=1, the output will be
recursively flattened.
Examples:
>>> qw('1 2')
['1', '2']
>>> qw(['a b','1 2',['m n','p q']])
[['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]]
>>> qw(['a b','1 2',['m n','p q']],flat=1)
['a', 'b', '1', '2', 'm', 'n', 'p', 'q']
"""
if isinstance(words, basestring):
return [word.strip() for word in words.split(sep,maxsplit)
if word and not word.isspace() ]
if flat:
return flatten(map(qw,words,[1]*len(words)))
return map(qw,words)
|
def function[qw, parameter[words, flat, sep, maxsplit]]:
constant[Similar to Perl's qw() operator, but with some more options.
qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit)
words can also be a list itself, and with flat=1, the output will be
recursively flattened.
Examples:
>>> qw('1 2')
['1', '2']
>>> qw(['a b','1 2',['m n','p q']])
[['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]]
>>> qw(['a b','1 2',['m n','p q']],flat=1)
['a', 'b', '1', '2', 'm', 'n', 'p', 'q']
]
if call[name[isinstance], parameter[name[words], name[basestring]]] begin[:]
return[<ast.ListComp object at 0x7da18fe91e10>]
if name[flat] begin[:]
return[call[name[flatten], parameter[call[name[map], parameter[name[qw], name[words], binary_operation[list[[<ast.Constant object at 0x7da204622a40>]] * call[name[len], parameter[name[words]]]]]]]]]
return[call[name[map], parameter[name[qw], name[words]]]]
|
keyword[def] identifier[qw] ( identifier[words] , identifier[flat] = literal[int] , identifier[sep] = keyword[None] , identifier[maxsplit] =- literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[words] , identifier[basestring] ):
keyword[return] [ identifier[word] . identifier[strip] () keyword[for] identifier[word] keyword[in] identifier[words] . identifier[split] ( identifier[sep] , identifier[maxsplit] )
keyword[if] identifier[word] keyword[and] keyword[not] identifier[word] . identifier[isspace] ()]
keyword[if] identifier[flat] :
keyword[return] identifier[flatten] ( identifier[map] ( identifier[qw] , identifier[words] ,[ literal[int] ]* identifier[len] ( identifier[words] )))
keyword[return] identifier[map] ( identifier[qw] , identifier[words] )
|
def qw(words, flat=0, sep=None, maxsplit=-1):
"""Similar to Perl's qw() operator, but with some more options.
qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit)
words can also be a list itself, and with flat=1, the output will be
recursively flattened.
Examples:
>>> qw('1 2')
['1', '2']
>>> qw(['a b','1 2',['m n','p q']])
[['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]]
>>> qw(['a b','1 2',['m n','p q']],flat=1)
['a', 'b', '1', '2', 'm', 'n', 'p', 'q']
"""
if isinstance(words, basestring):
return [word.strip() for word in words.split(sep, maxsplit) if word and (not word.isspace())] # depends on [control=['if'], data=[]]
if flat:
return flatten(map(qw, words, [1] * len(words))) # depends on [control=['if'], data=[]]
return map(qw, words)
|
def get_params(self, deep=True):
"""
Obtain parameters for this estimator.
Used primarily for sklearn Pipelines and sklearn grid search.
:param deep: If True, return parameters of all sub-objects that are estimators.
:returns: A dict of parameters
"""
out = dict()
for key, value in self.parms.items():
if deep and isinstance(value, H2OEstimator):
deep_items = list(value.get_params().items())
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
|
def function[get_params, parameter[self, deep]]:
constant[
Obtain parameters for this estimator.
Used primarily for sklearn Pipelines and sklearn grid search.
:param deep: If True, return parameters of all sub-objects that are estimators.
:returns: A dict of parameters
]
variable[out] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cce50>, <ast.Name object at 0x7da18c4ced70>]]] in starred[call[name[self].parms.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18c4cf8e0> begin[:]
variable[deep_items] assign[=] call[name[list], parameter[call[call[name[value].get_params, parameter[]].items, parameter[]]]]
call[name[out].update, parameter[<ast.GeneratorExp object at 0x7da18c4cf130>]]
call[name[out]][name[key]] assign[=] name[value]
return[name[out]]
|
keyword[def] identifier[get_params] ( identifier[self] , identifier[deep] = keyword[True] ):
literal[string]
identifier[out] = identifier[dict] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[parms] . identifier[items] ():
keyword[if] identifier[deep] keyword[and] identifier[isinstance] ( identifier[value] , identifier[H2OEstimator] ):
identifier[deep_items] = identifier[list] ( identifier[value] . identifier[get_params] (). identifier[items] ())
identifier[out] . identifier[update] (( identifier[key] + literal[string] + identifier[k] , identifier[val] ) keyword[for] identifier[k] , identifier[val] keyword[in] identifier[deep_items] )
identifier[out] [ identifier[key] ]= identifier[value]
keyword[return] identifier[out]
|
def get_params(self, deep=True):
"""
Obtain parameters for this estimator.
Used primarily for sklearn Pipelines and sklearn grid search.
:param deep: If True, return parameters of all sub-objects that are estimators.
:returns: A dict of parameters
"""
out = dict()
for (key, value) in self.parms.items():
if deep and isinstance(value, H2OEstimator):
deep_items = list(value.get_params().items())
out.update(((key + '__' + k, val) for (k, val) in deep_items)) # depends on [control=['if'], data=[]]
out[key] = value # depends on [control=['for'], data=[]]
return out
|
def get_metadata_for_nifti(in_file, bids_dir=None, validate=True):
"""Fetch metadata for a given nifti file
>>> metadata = get_metadata_for_nifti(
... datadir / 'ds054' / 'sub-100185' / 'fmap' / 'sub-100185_phasediff.nii.gz',
... validate=False)
>>> metadata['Manufacturer']
'SIEMENS'
>>>
"""
return _init_layout(in_file, bids_dir, validate).get_metadata(
str(in_file))
|
def function[get_metadata_for_nifti, parameter[in_file, bids_dir, validate]]:
constant[Fetch metadata for a given nifti file
>>> metadata = get_metadata_for_nifti(
... datadir / 'ds054' / 'sub-100185' / 'fmap' / 'sub-100185_phasediff.nii.gz',
... validate=False)
>>> metadata['Manufacturer']
'SIEMENS'
>>>
]
return[call[call[name[_init_layout], parameter[name[in_file], name[bids_dir], name[validate]]].get_metadata, parameter[call[name[str], parameter[name[in_file]]]]]]
|
keyword[def] identifier[get_metadata_for_nifti] ( identifier[in_file] , identifier[bids_dir] = keyword[None] , identifier[validate] = keyword[True] ):
literal[string]
keyword[return] identifier[_init_layout] ( identifier[in_file] , identifier[bids_dir] , identifier[validate] ). identifier[get_metadata] (
identifier[str] ( identifier[in_file] ))
|
def get_metadata_for_nifti(in_file, bids_dir=None, validate=True):
"""Fetch metadata for a given nifti file
>>> metadata = get_metadata_for_nifti(
... datadir / 'ds054' / 'sub-100185' / 'fmap' / 'sub-100185_phasediff.nii.gz',
... validate=False)
>>> metadata['Manufacturer']
'SIEMENS'
>>>
"""
return _init_layout(in_file, bids_dir, validate).get_metadata(str(in_file))
|
def submit_job(self, bundle, job_config=None):
"""Submit a Streams Application Bundle (sab file) to
this Streaming Analytics service.
Args:
bundle(str): path to a Streams application bundle (sab file)
containing the application to be submitted
job_config(JobConfig): a job configuration overlay
Returns:
dict: JSON response from service containing 'name' field with unique
job name assigned to submitted job, or, 'error_status' and
'description' fields if submission was unsuccessful.
"""
return self._delegator._submit_job(bundle=bundle, job_config=job_config)
|
def function[submit_job, parameter[self, bundle, job_config]]:
constant[Submit a Streams Application Bundle (sab file) to
this Streaming Analytics service.
Args:
bundle(str): path to a Streams application bundle (sab file)
containing the application to be submitted
job_config(JobConfig): a job configuration overlay
Returns:
dict: JSON response from service containing 'name' field with unique
job name assigned to submitted job, or, 'error_status' and
'description' fields if submission was unsuccessful.
]
return[call[name[self]._delegator._submit_job, parameter[]]]
|
keyword[def] identifier[submit_job] ( identifier[self] , identifier[bundle] , identifier[job_config] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_delegator] . identifier[_submit_job] ( identifier[bundle] = identifier[bundle] , identifier[job_config] = identifier[job_config] )
|
def submit_job(self, bundle, job_config=None):
"""Submit a Streams Application Bundle (sab file) to
this Streaming Analytics service.
Args:
bundle(str): path to a Streams application bundle (sab file)
containing the application to be submitted
job_config(JobConfig): a job configuration overlay
Returns:
dict: JSON response from service containing 'name' field with unique
job name assigned to submitted job, or, 'error_status' and
'description' fields if submission was unsuccessful.
"""
return self._delegator._submit_job(bundle=bundle, job_config=job_config)
|
async def eap_options(request: web.Request) -> web.Response:
""" Get request returns the available configuration options for WPA-EAP.
Because the options for connecting to WPA-EAP secured networks are quite
complex, to avoid duplicating logic this endpoint returns a json object
describing the structure of arguments and options for the eap_config arg to
/wifi/configure.
The object is shaped like this:
{
options: [ // Supported EAP methods and their options. One of these
// method names must be passed in the eapConfig dict
{
name: str // i.e. TTLS-EAPMSCHAPv2. Should be in the eapType
// key of eapConfig when sent to /configure.
options: [
{
name: str // i.e. "username"
displayName: str // i.e. "Username"
required: bool,
type: str
}
]
}
]
}
The ``type`` keys denote the semantic kind of the argument. Valid types
are:
password: This is some kind of password. It may be a psk for the network,
an Active Directory password, or the passphrase for a private key
string: A generic string; perhaps a username, or a subject-matches
domain name for server validation
file: A file that the user must provide. This should be the id of a
file previously uploaded via POST /wifi/keys.
Although the arguments are described hierarchically, they should be
specified in eap_config as a flat dict. For instance, a /configure
invocation for TTLS/EAP-TLS might look like
```
POST
{
ssid: "my-ssid",
securityType: "wpa-eap",
hidden: false,
eapConfig : {
eapType: "TTLS/EAP-TLS", // One of the method options
identity: "alice@example.com", // And then its arguments
anonymousIdentity: "anonymous@example.com",
password: "testing123",
caCert: "12d1f180f081b",
phase2CaCert: "12d1f180f081b",
phase2ClientCert: "009909fd9fa",
phase2PrivateKey: "081009fbcbc"
phase2PrivateKeyPassword: "testing321"
}
}
```
"""
return web.json_response(EAP_CONFIG_SHAPE, status=200)
|
<ast.AsyncFunctionDef object at 0x7da1b26adb40>
|
keyword[async] keyword[def] identifier[eap_options] ( identifier[request] : identifier[web] . identifier[Request] )-> identifier[web] . identifier[Response] :
literal[string]
keyword[return] identifier[web] . identifier[json_response] ( identifier[EAP_CONFIG_SHAPE] , identifier[status] = literal[int] )
|
async def eap_options(request: web.Request) -> web.Response:
""" Get request returns the available configuration options for WPA-EAP.
Because the options for connecting to WPA-EAP secured networks are quite
complex, to avoid duplicating logic this endpoint returns a json object
describing the structure of arguments and options for the eap_config arg to
/wifi/configure.
The object is shaped like this:
{
options: [ // Supported EAP methods and their options. One of these
// method names must be passed in the eapConfig dict
{
name: str // i.e. TTLS-EAPMSCHAPv2. Should be in the eapType
// key of eapConfig when sent to /configure.
options: [
{
name: str // i.e. "username"
displayName: str // i.e. "Username"
required: bool,
type: str
}
]
}
]
}
The ``type`` keys denote the semantic kind of the argument. Valid types
are:
password: This is some kind of password. It may be a psk for the network,
an Active Directory password, or the passphrase for a private key
string: A generic string; perhaps a username, or a subject-matches
domain name for server validation
file: A file that the user must provide. This should be the id of a
file previously uploaded via POST /wifi/keys.
Although the arguments are described hierarchically, they should be
specified in eap_config as a flat dict. For instance, a /configure
invocation for TTLS/EAP-TLS might look like
```
POST
{
ssid: "my-ssid",
securityType: "wpa-eap",
hidden: false,
eapConfig : {
eapType: "TTLS/EAP-TLS", // One of the method options
identity: "alice@example.com", // And then its arguments
anonymousIdentity: "anonymous@example.com",
password: "testing123",
caCert: "12d1f180f081b",
phase2CaCert: "12d1f180f081b",
phase2ClientCert: "009909fd9fa",
phase2PrivateKey: "081009fbcbc"
phase2PrivateKeyPassword: "testing321"
}
}
```
"""
return web.json_response(EAP_CONFIG_SHAPE, status=200)
|
def __calculate_dataset_difference(self, amount_clusters):
"""!
@brief Calculate distance from each point to each cluster center.
"""
dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))
for index_center in range(amount_clusters):
if self.__metric.get_type() != type_metric.USER_DEFINED:
dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center])
else:
dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center])
for point in self.__pointer_data ]
return dataset_differences
|
def function[__calculate_dataset_difference, parameter[self, amount_clusters]]:
constant[!
@brief Calculate distance from each point to each cluster center.
]
variable[dataset_differences] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da1b01b1510>, <ast.Call object at 0x7da1b01b2800>]]]]
for taget[name[index_center]] in starred[call[name[range], parameter[name[amount_clusters]]]] begin[:]
if compare[call[name[self].__metric.get_type, parameter[]] not_equal[!=] name[type_metric].USER_DEFINED] begin[:]
call[name[dataset_differences]][name[index_center]] assign[=] call[name[self].__metric, parameter[name[self].__pointer_data, call[name[self].__centers][name[index_center]]]]
return[name[dataset_differences]]
|
keyword[def] identifier[__calculate_dataset_difference] ( identifier[self] , identifier[amount_clusters] ):
literal[string]
identifier[dataset_differences] = identifier[numpy] . identifier[zeros] (( identifier[amount_clusters] , identifier[len] ( identifier[self] . identifier[__pointer_data] )))
keyword[for] identifier[index_center] keyword[in] identifier[range] ( identifier[amount_clusters] ):
keyword[if] identifier[self] . identifier[__metric] . identifier[get_type] ()!= identifier[type_metric] . identifier[USER_DEFINED] :
identifier[dataset_differences] [ identifier[index_center] ]= identifier[self] . identifier[__metric] ( identifier[self] . identifier[__pointer_data] , identifier[self] . identifier[__centers] [ identifier[index_center] ])
keyword[else] :
identifier[dataset_differences] [ identifier[index_center] ]=[ identifier[self] . identifier[__metric] ( identifier[point] , identifier[self] . identifier[__centers] [ identifier[index_center] ])
keyword[for] identifier[point] keyword[in] identifier[self] . identifier[__pointer_data] ]
keyword[return] identifier[dataset_differences]
|
def __calculate_dataset_difference(self, amount_clusters):
"""!
@brief Calculate distance from each point to each cluster center.
"""
dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))
for index_center in range(amount_clusters):
if self.__metric.get_type() != type_metric.USER_DEFINED:
dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center]) # depends on [control=['if'], data=[]]
else:
dataset_differences[index_center] = [self.__metric(point, self.__centers[index_center]) for point in self.__pointer_data] # depends on [control=['for'], data=['index_center']]
return dataset_differences
|
def master_etcd(info, meta, max_pod_cluster, label):
""" Function used to create the response for all master node types """
nodes = meta.get(label, []) or []
info = info[info["machine_id"].isin(nodes)]
if info.empty:
return
cpu_factor = max_pod_cluster / 1000.0
nocpu_expected = MASTER_MIN_CORE + (max_pod_cluster / 1000.0)
bad = info[info["cpu_count"] < nocpu_expected]
good = info[info["cpu_count"] >= nocpu_expected]
return make_response("MASTER_ETCD",
nocpu_expected=nocpu_expected, cpu_factor=cpu_factor,
bad=bad, good=good, max_pod_cluster=max_pod_cluster,
GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
|
def function[master_etcd, parameter[info, meta, max_pod_cluster, label]]:
constant[ Function used to create the response for all master node types ]
variable[nodes] assign[=] <ast.BoolOp object at 0x7da1b184be50>
variable[info] assign[=] call[name[info]][call[call[name[info]][constant[machine_id]].isin, parameter[name[nodes]]]]
if name[info].empty begin[:]
return[None]
variable[cpu_factor] assign[=] binary_operation[name[max_pod_cluster] / constant[1000.0]]
variable[nocpu_expected] assign[=] binary_operation[name[MASTER_MIN_CORE] + binary_operation[name[max_pod_cluster] / constant[1000.0]]]
variable[bad] assign[=] call[name[info]][compare[call[name[info]][constant[cpu_count]] less[<] name[nocpu_expected]]]
variable[good] assign[=] call[name[info]][compare[call[name[info]][constant[cpu_count]] greater_or_equal[>=] name[nocpu_expected]]]
return[call[name[make_response], parameter[constant[MASTER_ETCD]]]]
|
keyword[def] identifier[master_etcd] ( identifier[info] , identifier[meta] , identifier[max_pod_cluster] , identifier[label] ):
literal[string]
identifier[nodes] = identifier[meta] . identifier[get] ( identifier[label] ,[]) keyword[or] []
identifier[info] = identifier[info] [ identifier[info] [ literal[string] ]. identifier[isin] ( identifier[nodes] )]
keyword[if] identifier[info] . identifier[empty] :
keyword[return]
identifier[cpu_factor] = identifier[max_pod_cluster] / literal[int]
identifier[nocpu_expected] = identifier[MASTER_MIN_CORE] +( identifier[max_pod_cluster] / literal[int] )
identifier[bad] = identifier[info] [ identifier[info] [ literal[string] ]< identifier[nocpu_expected] ]
identifier[good] = identifier[info] [ identifier[info] [ literal[string] ]>= identifier[nocpu_expected] ]
keyword[return] identifier[make_response] ( literal[string] ,
identifier[nocpu_expected] = identifier[nocpu_expected] , identifier[cpu_factor] = identifier[cpu_factor] ,
identifier[bad] = identifier[bad] , identifier[good] = identifier[good] , identifier[max_pod_cluster] = identifier[max_pod_cluster] ,
identifier[GREEN] = identifier[Fore] . identifier[GREEN] , identifier[RED] = identifier[Fore] . identifier[RED] , identifier[YELLOW] = identifier[Fore] . identifier[YELLOW] , identifier[NC] = identifier[Style] . identifier[RESET_ALL] )
|
def master_etcd(info, meta, max_pod_cluster, label):
""" Function used to create the response for all master node types """
nodes = meta.get(label, []) or []
info = info[info['machine_id'].isin(nodes)]
if info.empty:
return # depends on [control=['if'], data=[]]
cpu_factor = max_pod_cluster / 1000.0
nocpu_expected = MASTER_MIN_CORE + max_pod_cluster / 1000.0
bad = info[info['cpu_count'] < nocpu_expected]
good = info[info['cpu_count'] >= nocpu_expected]
return make_response('MASTER_ETCD', nocpu_expected=nocpu_expected, cpu_factor=cpu_factor, bad=bad, good=good, max_pod_cluster=max_pod_cluster, GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
|
def fit(self, X):
"""Compute the distribution for each variable and then its covariance matrix.
Args:
X(numpy.ndarray or pandas.DataFrame): Data to model.
Returns:
None
"""
LOGGER.debug('Fitting Gaussian Copula')
column_names = self.get_column_names(X)
distribution_class = import_object(self.distribution)
for column_name in column_names:
self.distribs[column_name] = distribution_class()
column = self.get_column(X, column_name)
self.distribs[column_name].fit(column)
self.covariance = self._get_covariance(X)
self.fitted = True
|
def function[fit, parameter[self, X]]:
constant[Compute the distribution for each variable and then its covariance matrix.
Args:
X(numpy.ndarray or pandas.DataFrame): Data to model.
Returns:
None
]
call[name[LOGGER].debug, parameter[constant[Fitting Gaussian Copula]]]
variable[column_names] assign[=] call[name[self].get_column_names, parameter[name[X]]]
variable[distribution_class] assign[=] call[name[import_object], parameter[name[self].distribution]]
for taget[name[column_name]] in starred[name[column_names]] begin[:]
call[name[self].distribs][name[column_name]] assign[=] call[name[distribution_class], parameter[]]
variable[column] assign[=] call[name[self].get_column, parameter[name[X], name[column_name]]]
call[call[name[self].distribs][name[column_name]].fit, parameter[name[column]]]
name[self].covariance assign[=] call[name[self]._get_covariance, parameter[name[X]]]
name[self].fitted assign[=] constant[True]
|
keyword[def] identifier[fit] ( identifier[self] , identifier[X] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[column_names] = identifier[self] . identifier[get_column_names] ( identifier[X] )
identifier[distribution_class] = identifier[import_object] ( identifier[self] . identifier[distribution] )
keyword[for] identifier[column_name] keyword[in] identifier[column_names] :
identifier[self] . identifier[distribs] [ identifier[column_name] ]= identifier[distribution_class] ()
identifier[column] = identifier[self] . identifier[get_column] ( identifier[X] , identifier[column_name] )
identifier[self] . identifier[distribs] [ identifier[column_name] ]. identifier[fit] ( identifier[column] )
identifier[self] . identifier[covariance] = identifier[self] . identifier[_get_covariance] ( identifier[X] )
identifier[self] . identifier[fitted] = keyword[True]
|
def fit(self, X):
"""Compute the distribution for each variable and then its covariance matrix.
Args:
X(numpy.ndarray or pandas.DataFrame): Data to model.
Returns:
None
"""
LOGGER.debug('Fitting Gaussian Copula')
column_names = self.get_column_names(X)
distribution_class = import_object(self.distribution)
for column_name in column_names:
self.distribs[column_name] = distribution_class()
column = self.get_column(X, column_name)
self.distribs[column_name].fit(column) # depends on [control=['for'], data=['column_name']]
self.covariance = self._get_covariance(X)
self.fitted = True
|
def create_manager(self, base_manager=models.Manager):
"""
This will create the custom Manager that will use the fields_model and values_model
respectively.
:param base_manager: the base manager class to inherit from
:return:
"""
_builder = self
class CustomManager(base_manager):
def search(self, search_data, custom_args={}):
"""
Search inside the custom fields for this model for any match
of search_data and returns existing model instances
:param search_data:
:param custom_args:
:return:
"""
query = None
lookups = (
'%s__%s' % ('value_text', 'icontains'),
)
content_type = ContentType.objects.get_for_model(self.model)
custom_args = dict({ 'content_type': content_type, 'searchable': True }, **custom_args)
custom_fields = dict((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args))
for value_lookup in lookups:
for key, f in custom_fields.items():
found = _builder.values_model_class.objects.filter(**{ 'custom_field': f,
'content_type': content_type,
value_lookup: search_data })
if found.count() > 0:
if query is None:
query = Q()
query = query & Q(**{ str('%s__in' % self.model._meta.pk.name):
[obj.object_id for obj in found] })
if query is None:
return self.get_queryset().none()
return self.get_queryset().filter(query)
return CustomManager
|
def function[create_manager, parameter[self, base_manager]]:
constant[
This will create the custom Manager that will use the fields_model and values_model
respectively.
:param base_manager: the base manager class to inherit from
:return:
]
variable[_builder] assign[=] name[self]
class class[CustomManager, parameter[]] begin[:]
def function[search, parameter[self, search_data, custom_args]]:
constant[
Search inside the custom fields for this model for any match
of search_data and returns existing model instances
:param search_data:
:param custom_args:
:return:
]
variable[query] assign[=] constant[None]
variable[lookups] assign[=] tuple[[<ast.BinOp object at 0x7da20c6a9360>]]
variable[content_type] assign[=] call[name[ContentType].objects.get_for_model, parameter[name[self].model]]
variable[custom_args] assign[=] call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c6a9e10>, <ast.Constant object at 0x7da20c6aac20>], [<ast.Name object at 0x7da20c6ab700>, <ast.Constant object at 0x7da20c6aad70>]]]]
variable[custom_fields] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20c6aae00>]]
for taget[name[value_lookup]] in starred[name[lookups]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6a8e50>, <ast.Name object at 0x7da20c6a9870>]]] in starred[call[name[custom_fields].items, parameter[]]] begin[:]
variable[found] assign[=] call[name[_builder].values_model_class.objects.filter, parameter[]]
if compare[call[name[found].count, parameter[]] greater[>] constant[0]] begin[:]
if compare[name[query] is constant[None]] begin[:]
variable[query] assign[=] call[name[Q], parameter[]]
variable[query] assign[=] binary_operation[name[query] <ast.BitAnd object at 0x7da2590d6b60> call[name[Q], parameter[]]]
if compare[name[query] is constant[None]] begin[:]
return[call[call[name[self].get_queryset, parameter[]].none, parameter[]]]
return[call[call[name[self].get_queryset, parameter[]].filter, parameter[name[query]]]]
return[name[CustomManager]]
|
keyword[def] identifier[create_manager] ( identifier[self] , identifier[base_manager] = identifier[models] . identifier[Manager] ):
literal[string]
identifier[_builder] = identifier[self]
keyword[class] identifier[CustomManager] ( identifier[base_manager] ):
keyword[def] identifier[search] ( identifier[self] , identifier[search_data] , identifier[custom_args] ={}):
literal[string]
identifier[query] = keyword[None]
identifier[lookups] =(
literal[string] %( literal[string] , literal[string] ),
)
identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[self] . identifier[model] )
identifier[custom_args] = identifier[dict] ({ literal[string] : identifier[content_type] , literal[string] : keyword[True] },** identifier[custom_args] )
identifier[custom_fields] = identifier[dict] (( identifier[f] . identifier[name] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[_builder] . identifier[fields_model_class] . identifier[objects] . identifier[filter] (** identifier[custom_args] ))
keyword[for] identifier[value_lookup] keyword[in] identifier[lookups] :
keyword[for] identifier[key] , identifier[f] keyword[in] identifier[custom_fields] . identifier[items] ():
identifier[found] = identifier[_builder] . identifier[values_model_class] . identifier[objects] . identifier[filter] (**{ literal[string] : identifier[f] ,
literal[string] : identifier[content_type] ,
identifier[value_lookup] : identifier[search_data] })
keyword[if] identifier[found] . identifier[count] ()> literal[int] :
keyword[if] identifier[query] keyword[is] keyword[None] :
identifier[query] = identifier[Q] ()
identifier[query] = identifier[query] & identifier[Q] (**{ identifier[str] ( literal[string] % identifier[self] . identifier[model] . identifier[_meta] . identifier[pk] . identifier[name] ):
[ identifier[obj] . identifier[object_id] keyword[for] identifier[obj] keyword[in] identifier[found] ]})
keyword[if] identifier[query] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[get_queryset] (). identifier[none] ()
keyword[return] identifier[self] . identifier[get_queryset] (). identifier[filter] ( identifier[query] )
keyword[return] identifier[CustomManager]
|
def create_manager(self, base_manager=models.Manager):
"""
This will create the custom Manager that will use the fields_model and values_model
respectively.
:param base_manager: the base manager class to inherit from
:return:
"""
_builder = self
class CustomManager(base_manager):
def search(self, search_data, custom_args={}):
"""
Search inside the custom fields for this model for any match
of search_data and returns existing model instances
:param search_data:
:param custom_args:
:return:
"""
query = None
lookups = ('%s__%s' % ('value_text', 'icontains'),)
content_type = ContentType.objects.get_for_model(self.model)
custom_args = dict({'content_type': content_type, 'searchable': True}, **custom_args)
custom_fields = dict(((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args)))
for value_lookup in lookups:
for (key, f) in custom_fields.items():
found = _builder.values_model_class.objects.filter(**{'custom_field': f, 'content_type': content_type, value_lookup: search_data})
if found.count() > 0:
if query is None:
query = Q() # depends on [control=['if'], data=['query']]
query = query & Q(**{str('%s__in' % self.model._meta.pk.name): [obj.object_id for obj in found]}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['value_lookup']]
if query is None:
return self.get_queryset().none() # depends on [control=['if'], data=[]]
return self.get_queryset().filter(query)
return CustomManager
|
def native_contracts(address: int, data: BaseCalldata) -> List[int]:
"""Takes integer address 1, 2, 3, 4.
:param address:
:param data:
:return:
"""
functions = (ecrecover, sha256, ripemd160, identity)
if isinstance(data, ConcreteCalldata):
concrete_data = data.concrete(None)
else:
raise NativeContractException()
return functions[address - 1](concrete_data)
|
def function[native_contracts, parameter[address, data]]:
constant[Takes integer address 1, 2, 3, 4.
:param address:
:param data:
:return:
]
variable[functions] assign[=] tuple[[<ast.Name object at 0x7da1b1dde500>, <ast.Name object at 0x7da1b1ddfb50>, <ast.Name object at 0x7da1b1ddfdf0>, <ast.Name object at 0x7da1b1ddd990>]]
if call[name[isinstance], parameter[name[data], name[ConcreteCalldata]]] begin[:]
variable[concrete_data] assign[=] call[name[data].concrete, parameter[constant[None]]]
return[call[call[name[functions]][binary_operation[name[address] - constant[1]]], parameter[name[concrete_data]]]]
|
keyword[def] identifier[native_contracts] ( identifier[address] : identifier[int] , identifier[data] : identifier[BaseCalldata] )-> identifier[List] [ identifier[int] ]:
literal[string]
identifier[functions] =( identifier[ecrecover] , identifier[sha256] , identifier[ripemd160] , identifier[identity] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[ConcreteCalldata] ):
identifier[concrete_data] = identifier[data] . identifier[concrete] ( keyword[None] )
keyword[else] :
keyword[raise] identifier[NativeContractException] ()
keyword[return] identifier[functions] [ identifier[address] - literal[int] ]( identifier[concrete_data] )
|
def native_contracts(address: int, data: BaseCalldata) -> List[int]:
"""Takes integer address 1, 2, 3, 4.
:param address:
:param data:
:return:
"""
functions = (ecrecover, sha256, ripemd160, identity)
if isinstance(data, ConcreteCalldata):
concrete_data = data.concrete(None) # depends on [control=['if'], data=[]]
else:
raise NativeContractException()
return functions[address - 1](concrete_data)
|
def dimensions(self, copy=True):
"""
Return a dictionary of :class:`~hypercube.dims.Dimension` objects.
Parameters
----------
copy : boolean:
Returns a copy of the dimension dictionary if True (Default value = True)
Returns
-------
dict
Dictionary of :class:`~hypercube.dims.Dimension` objects.
"""
return self._dims.copy() if copy else self._dims
|
def function[dimensions, parameter[self, copy]]:
constant[
Return a dictionary of :class:`~hypercube.dims.Dimension` objects.
Parameters
----------
copy : boolean:
Returns a copy of the dimension dictionary if True (Default value = True)
Returns
-------
dict
Dictionary of :class:`~hypercube.dims.Dimension` objects.
]
return[<ast.IfExp object at 0x7da1b26adae0>]
|
keyword[def] identifier[dimensions] ( identifier[self] , identifier[copy] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[_dims] . identifier[copy] () keyword[if] identifier[copy] keyword[else] identifier[self] . identifier[_dims]
|
def dimensions(self, copy=True):
"""
Return a dictionary of :class:`~hypercube.dims.Dimension` objects.
Parameters
----------
copy : boolean:
Returns a copy of the dimension dictionary if True (Default value = True)
Returns
-------
dict
Dictionary of :class:`~hypercube.dims.Dimension` objects.
"""
return self._dims.copy() if copy else self._dims
|
def id_unique(dict_id, name, lineno):
"""Returns True if dict_id not already used. Otherwise, invokes error"""
if dict_id in name_dict:
global error_occurred
error_occurred = True
print(
"ERROR - {0:s} definition {1:s} at line {2:d} conflicts with {3:s}"
.format(name, dict_id, lineno, name_dict[dict_id]))
return False
else:
return True
|
def function[id_unique, parameter[dict_id, name, lineno]]:
constant[Returns True if dict_id not already used. Otherwise, invokes error]
if compare[name[dict_id] in name[name_dict]] begin[:]
<ast.Global object at 0x7da1b170cac0>
variable[error_occurred] assign[=] constant[True]
call[name[print], parameter[call[constant[ERROR - {0:s} definition {1:s} at line {2:d} conflicts with {3:s}].format, parameter[name[name], name[dict_id], name[lineno], call[name[name_dict]][name[dict_id]]]]]]
return[constant[False]]
|
keyword[def] identifier[id_unique] ( identifier[dict_id] , identifier[name] , identifier[lineno] ):
literal[string]
keyword[if] identifier[dict_id] keyword[in] identifier[name_dict] :
keyword[global] identifier[error_occurred]
identifier[error_occurred] = keyword[True]
identifier[print] (
literal[string]
. identifier[format] ( identifier[name] , identifier[dict_id] , identifier[lineno] , identifier[name_dict] [ identifier[dict_id] ]))
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True]
|
def id_unique(dict_id, name, lineno):
"""Returns True if dict_id not already used. Otherwise, invokes error"""
if dict_id in name_dict:
global error_occurred
error_occurred = True
print('ERROR - {0:s} definition {1:s} at line {2:d} conflicts with {3:s}'.format(name, dict_id, lineno, name_dict[dict_id]))
return False # depends on [control=['if'], data=['dict_id', 'name_dict']]
else:
return True
|
def visit_project(self, node):
"""visit a pyreverse.utils.Project node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id()
for module in node.modules:
self.visit(module)
|
def function[visit_project, parameter[self, node]]:
constant[visit a pyreverse.utils.Project node
* optionally tag the node with a unique id
]
if name[self].tag begin[:]
name[node].uid assign[=] call[name[self].generate_id, parameter[]]
for taget[name[module]] in starred[name[node].modules] begin[:]
call[name[self].visit, parameter[name[module]]]
|
keyword[def] identifier[visit_project] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[self] . identifier[tag] :
identifier[node] . identifier[uid] = identifier[self] . identifier[generate_id] ()
keyword[for] identifier[module] keyword[in] identifier[node] . identifier[modules] :
identifier[self] . identifier[visit] ( identifier[module] )
|
def visit_project(self, node):
"""visit a pyreverse.utils.Project node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id() # depends on [control=['if'], data=[]]
for module in node.modules:
self.visit(module) # depends on [control=['for'], data=['module']]
|
def Imm(extended_map, s, lmax):
"""Take the fft of the theta extended map, then zero pad and reorganize it
This is mostly an internal function, included here for backwards compatibility. See map2salm
and salm2map for more useful functions.
"""
import numpy as np
extended_map = np.ascontiguousarray(extended_map, dtype=np.complex128)
NImm = (2*lmax + 1)**2
imm = np.empty(NImm, dtype=np.complex128)
_Imm(extended_map, imm, s, lmax)
return imm
|
def function[Imm, parameter[extended_map, s, lmax]]:
constant[Take the fft of the theta extended map, then zero pad and reorganize it
This is mostly an internal function, included here for backwards compatibility. See map2salm
and salm2map for more useful functions.
]
import module[numpy] as alias[np]
variable[extended_map] assign[=] call[name[np].ascontiguousarray, parameter[name[extended_map]]]
variable[NImm] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * name[lmax]] + constant[1]] ** constant[2]]
variable[imm] assign[=] call[name[np].empty, parameter[name[NImm]]]
call[name[_Imm], parameter[name[extended_map], name[imm], name[s], name[lmax]]]
return[name[imm]]
|
keyword[def] identifier[Imm] ( identifier[extended_map] , identifier[s] , identifier[lmax] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
identifier[extended_map] = identifier[np] . identifier[ascontiguousarray] ( identifier[extended_map] , identifier[dtype] = identifier[np] . identifier[complex128] )
identifier[NImm] =( literal[int] * identifier[lmax] + literal[int] )** literal[int]
identifier[imm] = identifier[np] . identifier[empty] ( identifier[NImm] , identifier[dtype] = identifier[np] . identifier[complex128] )
identifier[_Imm] ( identifier[extended_map] , identifier[imm] , identifier[s] , identifier[lmax] )
keyword[return] identifier[imm]
|
def Imm(extended_map, s, lmax):
"""Take the fft of the theta extended map, then zero pad and reorganize it
This is mostly an internal function, included here for backwards compatibility. See map2salm
and salm2map for more useful functions.
"""
import numpy as np
extended_map = np.ascontiguousarray(extended_map, dtype=np.complex128)
NImm = (2 * lmax + 1) ** 2
imm = np.empty(NImm, dtype=np.complex128)
_Imm(extended_map, imm, s, lmax)
return imm
|
def fastp_general_stats_table(self):
""" Take the parsed stats from the fastp report and add it to the
General Statistics table at the top of the report """
headers = OrderedDict()
headers['pct_duplication'] = {
'title': '% Duplication',
'description': 'Duplication rate in filtered reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn-rev'
}
headers['after_filtering_q30_rate'] = {
'title': '% > Q30',
'description': 'Percentage of reads > Q30 after filtering',
'min': 0,
'max': 100,
'modify': lambda x: x * 100.0,
'scale': 'GnBu',
'suffix': '%',
'hidden': True
}
headers['after_filtering_q30_bases'] = {
'title': '{} Q30 bases'.format(config.base_count_prefix),
'description': 'Bases > Q30 after filtering ({})'.format(config.base_count_desc),
'min': 0,
'modify': lambda x: x * config.base_count_multiplier,
'scale': 'GnBu',
'shared_key': 'base_count',
'hidden': True
}
headers['after_filtering_gc_content'] = {
'title': 'GC content',
'description': 'GC content after filtering',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'Blues',
'modify': lambda x: x * 100.0
}
headers['pct_surviving'] = {
'title': '% PF',
'description': 'Percent reads passing filter',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'BuGn',
}
headers['pct_adapter'] = {
'title': '% Adapter',
'description': 'Percentage adapter-trimmed reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn-rev',
}
self.general_stats_addcols(self.fastp_data, headers)
|
def function[fastp_general_stats_table, parameter[self]]:
constant[ Take the parsed stats from the fastp report and add it to the
General Statistics table at the top of the report ]
variable[headers] assign[=] call[name[OrderedDict], parameter[]]
call[name[headers]][constant[pct_duplication]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c88b0>, <ast.Constant object at 0x7da20c7c9ba0>, <ast.Constant object at 0x7da20c7cabf0>, <ast.Constant object at 0x7da20c7c8f40>, <ast.Constant object at 0x7da20c7c8a30>, <ast.Constant object at 0x7da20c7cbe80>], [<ast.Constant object at 0x7da20c7c9cc0>, <ast.Constant object at 0x7da20c7c9870>, <ast.Constant object at 0x7da20c7c8490>, <ast.Constant object at 0x7da20c7c9b70>, <ast.Constant object at 0x7da20c7ca320>, <ast.Constant object at 0x7da20c7ca080>]]
call[name[headers]][constant[after_filtering_q30_rate]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7ca0b0>, <ast.Constant object at 0x7da20c7ca260>, <ast.Constant object at 0x7da20c7ca890>, <ast.Constant object at 0x7da20c7c82e0>, <ast.Constant object at 0x7da20c7cb220>, <ast.Constant object at 0x7da20c7cb670>, <ast.Constant object at 0x7da20c7c8d90>, <ast.Constant object at 0x7da20c7ca8f0>], [<ast.Constant object at 0x7da20c7c9480>, <ast.Constant object at 0x7da20c7cb1c0>, <ast.Constant object at 0x7da20c7c9c60>, <ast.Constant object at 0x7da20c7c9090>, <ast.Lambda object at 0x7da20c7cbcd0>, <ast.Constant object at 0x7da20c7cb7f0>, <ast.Constant object at 0x7da20c7c9f60>, <ast.Constant object at 0x7da20c7cb6d0>]]
call[name[headers]][constant[after_filtering_q30_bases]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7ca1d0>, <ast.Constant object at 0x7da20c7cbd30>, <ast.Constant object at 0x7da20c7cb2b0>, <ast.Constant object at 0x7da20c7ca680>, <ast.Constant object at 0x7da20c7c8250>, <ast.Constant object at 0x7da20c7c8f10>, <ast.Constant object at 0x7da20c7ca350>], [<ast.Call object at 0x7da20c7cb5b0>, <ast.Call object at 0x7da20c7c85b0>, <ast.Constant object at 0x7da20c7c9180>, <ast.Lambda object at 0x7da20c7cbf40>, <ast.Constant object at 0x7da20c7c92d0>, <ast.Constant object at 0x7da20c7c82b0>, <ast.Constant object at 0x7da20c7ca710>]]
call[name[headers]][constant[after_filtering_gc_content]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cbb50>, <ast.Constant object at 0x7da20c7c94e0>, <ast.Constant object at 0x7da20c7cb4c0>, <ast.Constant object at 0x7da20c7cad40>, <ast.Constant object at 0x7da20c7c8340>, <ast.Constant object at 0x7da20c7c8460>, <ast.Constant object at 0x7da20c7cbc40>], [<ast.Constant object at 0x7da20c7ca860>, <ast.Constant object at 0x7da20c7cb160>, <ast.Constant object at 0x7da20c7c8640>, <ast.Constant object at 0x7da20c7c8760>, <ast.Constant object at 0x7da20c7ca590>, <ast.Constant object at 0x7da20c7cbbb0>, <ast.Lambda object at 0x7da20c7ca8c0>]]
call[name[headers]][constant[pct_surviving]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cb460>, <ast.Constant object at 0x7da20c7c9e40>, <ast.Constant object at 0x7da20c7c85e0>, <ast.Constant object at 0x7da20c7c9240>, <ast.Constant object at 0x7da20c7cbac0>, <ast.Constant object at 0x7da20c7c9c90>], [<ast.Constant object at 0x7da20c7c9360>, <ast.Constant object at 0x7da20c7ca2c0>, <ast.Constant object at 0x7da20c7c8700>, <ast.Constant object at 0x7da20c7cb400>, <ast.Constant object at 0x7da20c7c9ed0>, <ast.Constant object at 0x7da20c7ca1a0>]]
call[name[headers]][constant[pct_adapter]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c8ac0>, <ast.Constant object at 0x7da20c7cb610>, <ast.Constant object at 0x7da20c7ca440>, <ast.Constant object at 0x7da20c7c9b10>, <ast.Constant object at 0x7da20c7cbc10>, <ast.Constant object at 0x7da20c7cbc70>], [<ast.Constant object at 0x7da20c7c8b80>, <ast.Constant object at 0x7da20c7c8a90>, <ast.Constant object at 0x7da20c7cace0>, <ast.Constant object at 0x7da20c7c9b40>, <ast.Constant object at 0x7da20c7c89d0>, <ast.Constant object at 0x7da20c7cb280>]]
call[name[self].general_stats_addcols, parameter[name[self].fastp_data, name[headers]]]
|
keyword[def] identifier[fastp_general_stats_table] ( identifier[self] ):
literal[string]
identifier[headers] = identifier[OrderedDict] ()
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[lambda] identifier[x] : identifier[x] * literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[True]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] . identifier[format] ( identifier[config] . identifier[base_count_prefix] ),
literal[string] : literal[string] . identifier[format] ( identifier[config] . identifier[base_count_desc] ),
literal[string] : literal[int] ,
literal[string] : keyword[lambda] identifier[x] : identifier[x] * identifier[config] . identifier[base_count_multiplier] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[True]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[lambda] identifier[x] : identifier[x] * literal[int]
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[headers] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[self] . identifier[general_stats_addcols] ( identifier[self] . identifier[fastp_data] , identifier[headers] )
|
def fastp_general_stats_table(self):
""" Take the parsed stats from the fastp report and add it to the
General Statistics table at the top of the report """
headers = OrderedDict()
headers['pct_duplication'] = {'title': '% Duplication', 'description': 'Duplication rate in filtered reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn-rev'}
headers['after_filtering_q30_rate'] = {'title': '% > Q30', 'description': 'Percentage of reads > Q30 after filtering', 'min': 0, 'max': 100, 'modify': lambda x: x * 100.0, 'scale': 'GnBu', 'suffix': '%', 'hidden': True}
headers['after_filtering_q30_bases'] = {'title': '{} Q30 bases'.format(config.base_count_prefix), 'description': 'Bases > Q30 after filtering ({})'.format(config.base_count_desc), 'min': 0, 'modify': lambda x: x * config.base_count_multiplier, 'scale': 'GnBu', 'shared_key': 'base_count', 'hidden': True}
headers['after_filtering_gc_content'] = {'title': 'GC content', 'description': 'GC content after filtering', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Blues', 'modify': lambda x: x * 100.0}
headers['pct_surviving'] = {'title': '% PF', 'description': 'Percent reads passing filter', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'BuGn'}
headers['pct_adapter'] = {'title': '% Adapter', 'description': 'Percentage adapter-trimmed reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn-rev'}
self.general_stats_addcols(self.fastp_data, headers)
|
def put(self, data):
"""
Write an item (will overwrite existing data)
Parameters
----------
data : dict
Item data
"""
self._to_put.append(data)
if self.should_flush():
self.flush()
|
def function[put, parameter[self, data]]:
constant[
Write an item (will overwrite existing data)
Parameters
----------
data : dict
Item data
]
call[name[self]._to_put.append, parameter[name[data]]]
if call[name[self].should_flush, parameter[]] begin[:]
call[name[self].flush, parameter[]]
|
keyword[def] identifier[put] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[_to_put] . identifier[append] ( identifier[data] )
keyword[if] identifier[self] . identifier[should_flush] ():
identifier[self] . identifier[flush] ()
|
def put(self, data):
"""
Write an item (will overwrite existing data)
Parameters
----------
data : dict
Item data
"""
self._to_put.append(data)
if self.should_flush():
self.flush() # depends on [control=['if'], data=[]]
|
def count_comments_handler(sender, **kwargs):
"""
Update Entry.comment_count when a public comment was posted.
"""
comment = kwargs['comment']
if comment.is_public:
entry = comment.content_object
if isinstance(entry, Entry):
entry.comment_count = F('comment_count') + 1
entry.save(update_fields=['comment_count'])
|
def function[count_comments_handler, parameter[sender]]:
constant[
Update Entry.comment_count when a public comment was posted.
]
variable[comment] assign[=] call[name[kwargs]][constant[comment]]
if name[comment].is_public begin[:]
variable[entry] assign[=] name[comment].content_object
if call[name[isinstance], parameter[name[entry], name[Entry]]] begin[:]
name[entry].comment_count assign[=] binary_operation[call[name[F], parameter[constant[comment_count]]] + constant[1]]
call[name[entry].save, parameter[]]
|
keyword[def] identifier[count_comments_handler] ( identifier[sender] ,** identifier[kwargs] ):
literal[string]
identifier[comment] = identifier[kwargs] [ literal[string] ]
keyword[if] identifier[comment] . identifier[is_public] :
identifier[entry] = identifier[comment] . identifier[content_object]
keyword[if] identifier[isinstance] ( identifier[entry] , identifier[Entry] ):
identifier[entry] . identifier[comment_count] = identifier[F] ( literal[string] )+ literal[int]
identifier[entry] . identifier[save] ( identifier[update_fields] =[ literal[string] ])
|
def count_comments_handler(sender, **kwargs):
"""
Update Entry.comment_count when a public comment was posted.
"""
comment = kwargs['comment']
if comment.is_public:
entry = comment.content_object
if isinstance(entry, Entry):
entry.comment_count = F('comment_count') + 1
entry.save(update_fields=['comment_count']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def _read(self, mux, gain, data_rate, mode):
"""Perform an ADC read with the provided mux, gain, data_rate, and mode
values. Returns the signed integer result of the read.
"""
config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion.
# Specify mux value.
config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET
# Validate the passed in gain and then set it in the config.
if gain not in ADS1x15_CONFIG_GAIN:
raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16')
config |= ADS1x15_CONFIG_GAIN[gain]
# Set the mode (continuous or single shot).
config |= mode
# Get the default data rate if none is specified (default differs between
# ADS1015 and ADS1115).
if data_rate is None:
data_rate = self._data_rate_default()
# Set the data rate (this is controlled by the subclass as it differs
# between ADS1015 and ADS1115).
config |= self._data_rate_config(data_rate)
config |= ADS1x15_CONFIG_COMP_QUE_DISABLE # Disble comparator mode.
# Send the config value to start the ADC conversion.
# Explicitly break the 16-bit value down to a big endian pair of bytes.
self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> 8) & 0xFF, config & 0xFF])
# Wait for the ADC sample to finish based on the sample rate plus a
# small offset to be sure (0.1 millisecond).
time.sleep(1.0/data_rate+0.0001)
# Retrieve the result.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0])
|
def function[_read, parameter[self, mux, gain, data_rate, mode]]:
constant[Perform an ADC read with the provided mux, gain, data_rate, and mode
values. Returns the signed integer result of the read.
]
variable[config] assign[=] name[ADS1x15_CONFIG_OS_SINGLE]
<ast.AugAssign object at 0x7da20c6c6a10>
if compare[name[gain] <ast.NotIn object at 0x7da2590d7190> name[ADS1x15_CONFIG_GAIN]] begin[:]
<ast.Raise object at 0x7da20c990cd0>
<ast.AugAssign object at 0x7da20c9936a0>
<ast.AugAssign object at 0x7da20c9903d0>
if compare[name[data_rate] is constant[None]] begin[:]
variable[data_rate] assign[=] call[name[self]._data_rate_default, parameter[]]
<ast.AugAssign object at 0x7da20c990f40>
<ast.AugAssign object at 0x7da20c991390>
call[name[self]._device.writeList, parameter[name[ADS1x15_POINTER_CONFIG], list[[<ast.BinOp object at 0x7da20c990e20>, <ast.BinOp object at 0x7da20c9930d0>]]]]
call[name[time].sleep, parameter[binary_operation[binary_operation[constant[1.0] / name[data_rate]] + constant[0.0001]]]]
variable[result] assign[=] call[name[self]._device.readList, parameter[name[ADS1x15_POINTER_CONVERSION], constant[2]]]
return[call[name[self]._conversion_value, parameter[call[name[result]][constant[1]], call[name[result]][constant[0]]]]]
|
keyword[def] identifier[_read] ( identifier[self] , identifier[mux] , identifier[gain] , identifier[data_rate] , identifier[mode] ):
literal[string]
identifier[config] = identifier[ADS1x15_CONFIG_OS_SINGLE]
identifier[config] |=( identifier[mux] & literal[int] )<< identifier[ADS1x15_CONFIG_MUX_OFFSET]
keyword[if] identifier[gain] keyword[not] keyword[in] identifier[ADS1x15_CONFIG_GAIN] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[config] |= identifier[ADS1x15_CONFIG_GAIN] [ identifier[gain] ]
identifier[config] |= identifier[mode]
keyword[if] identifier[data_rate] keyword[is] keyword[None] :
identifier[data_rate] = identifier[self] . identifier[_data_rate_default] ()
identifier[config] |= identifier[self] . identifier[_data_rate_config] ( identifier[data_rate] )
identifier[config] |= identifier[ADS1x15_CONFIG_COMP_QUE_DISABLE]
identifier[self] . identifier[_device] . identifier[writeList] ( identifier[ADS1x15_POINTER_CONFIG] ,[( identifier[config] >> literal[int] )& literal[int] , identifier[config] & literal[int] ])
identifier[time] . identifier[sleep] ( literal[int] / identifier[data_rate] + literal[int] )
identifier[result] = identifier[self] . identifier[_device] . identifier[readList] ( identifier[ADS1x15_POINTER_CONVERSION] , literal[int] )
keyword[return] identifier[self] . identifier[_conversion_value] ( identifier[result] [ literal[int] ], identifier[result] [ literal[int] ])
|
def _read(self, mux, gain, data_rate, mode):
"""Perform an ADC read with the provided mux, gain, data_rate, and mode
values. Returns the signed integer result of the read.
"""
config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion.
# Specify mux value.
config |= (mux & 7) << ADS1x15_CONFIG_MUX_OFFSET
# Validate the passed in gain and then set it in the config.
if gain not in ADS1x15_CONFIG_GAIN:
raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16') # depends on [control=['if'], data=[]]
config |= ADS1x15_CONFIG_GAIN[gain]
# Set the mode (continuous or single shot).
config |= mode
# Get the default data rate if none is specified (default differs between
# ADS1015 and ADS1115).
if data_rate is None:
data_rate = self._data_rate_default() # depends on [control=['if'], data=['data_rate']]
# Set the data rate (this is controlled by the subclass as it differs
# between ADS1015 and ADS1115).
config |= self._data_rate_config(data_rate)
config |= ADS1x15_CONFIG_COMP_QUE_DISABLE # Disble comparator mode.
# Send the config value to start the ADC conversion.
# Explicitly break the 16-bit value down to a big endian pair of bytes.
self._device.writeList(ADS1x15_POINTER_CONFIG, [config >> 8 & 255, config & 255])
# Wait for the ADC sample to finish based on the sample rate plus a
# small offset to be sure (0.1 millisecond).
time.sleep(1.0 / data_rate + 0.0001)
# Retrieve the result.
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0])
|
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
|
def function[write_shared_locations, parameter[self, paths, dry_run]]:
constant[
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
]
variable[shared_path] assign[=] call[name[os].path.join, parameter[name[self].path, constant[SHARED]]]
call[name[logger].info, parameter[constant[creating %s], name[shared_path]]]
if name[dry_run] begin[:]
return[constant[None]]
variable[lines] assign[=] list[[]]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da1b2089750>, <ast.Constant object at 0x7da1b208a7d0>, <ast.Constant object at 0x7da1b20895d0>, <ast.Constant object at 0x7da1b2088670>, <ast.Constant object at 0x7da1b208b6d0>]]] begin[:]
variable[path] assign[=] call[name[paths]][name[key]]
if call[name[os].path.isdir, parameter[call[name[paths]][name[key]]]] begin[:]
call[name[lines].append, parameter[binary_operation[constant[%s=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1e7b010>, <ast.Name object at 0x7da1b1e7bb80>]]]]]
for taget[name[ns]] in starred[call[name[paths].get, parameter[constant[namespace], tuple[[]]]]] begin[:]
call[name[lines].append, parameter[binary_operation[constant[namespace=%s] <ast.Mod object at 0x7da2590d6920> name[ns]]]]
with call[name[codecs].open, parameter[name[shared_path], constant[w]]] begin[:]
call[name[f].write, parameter[call[constant[
].join, parameter[name[lines]]]]]
return[name[shared_path]]
|
keyword[def] identifier[write_shared_locations] ( identifier[self] , identifier[paths] , identifier[dry_run] = keyword[False] ):
literal[string]
identifier[shared_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[path] , literal[string] )
identifier[logger] . identifier[info] ( literal[string] , identifier[shared_path] )
keyword[if] identifier[dry_run] :
keyword[return] keyword[None]
identifier[lines] =[]
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[path] = identifier[paths] [ identifier[key] ]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[paths] [ identifier[key] ]):
identifier[lines] . identifier[append] ( literal[string] %( identifier[key] , identifier[path] ))
keyword[for] identifier[ns] keyword[in] identifier[paths] . identifier[get] ( literal[string] ,()):
identifier[lines] . identifier[append] ( literal[string] % identifier[ns] )
keyword[with] identifier[codecs] . identifier[open] ( identifier[shared_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] . identifier[join] ( identifier[lines] ))
keyword[return] identifier[shared_path]
|
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None # depends on [control=['if'], data=[]]
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns) # depends on [control=['for'], data=['ns']]
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines)) # depends on [control=['with'], data=['f']]
return shared_path
|
def _traceroute_callback(self, line, kill_switch):
"""
Callback function to handle traceroute.
:param self:
:param line:
:param kill_switch:
:return:
"""
line = line.lower()
if "traceroute to" in line:
self.started = True
# need to run as root but not running as root.
# usually happens when doing TCP and ICMP traceroute.
if "enough privileges" in line:
self.error = True
self.kill_switch()
self.stopped = True
# name resolution failed
if "service not known" in line:
self.error = True
self.kill_switch()
self.stopped = True
|
def function[_traceroute_callback, parameter[self, line, kill_switch]]:
constant[
Callback function to handle traceroute.
:param self:
:param line:
:param kill_switch:
:return:
]
variable[line] assign[=] call[name[line].lower, parameter[]]
if compare[constant[traceroute to] in name[line]] begin[:]
name[self].started assign[=] constant[True]
if compare[constant[enough privileges] in name[line]] begin[:]
name[self].error assign[=] constant[True]
call[name[self].kill_switch, parameter[]]
name[self].stopped assign[=] constant[True]
if compare[constant[service not known] in name[line]] begin[:]
name[self].error assign[=] constant[True]
call[name[self].kill_switch, parameter[]]
name[self].stopped assign[=] constant[True]
|
keyword[def] identifier[_traceroute_callback] ( identifier[self] , identifier[line] , identifier[kill_switch] ):
literal[string]
identifier[line] = identifier[line] . identifier[lower] ()
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[self] . identifier[started] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[self] . identifier[error] = keyword[True]
identifier[self] . identifier[kill_switch] ()
identifier[self] . identifier[stopped] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[self] . identifier[error] = keyword[True]
identifier[self] . identifier[kill_switch] ()
identifier[self] . identifier[stopped] = keyword[True]
|
def _traceroute_callback(self, line, kill_switch):
"""
Callback function to handle traceroute.
:param self:
:param line:
:param kill_switch:
:return:
"""
line = line.lower()
if 'traceroute to' in line:
self.started = True # depends on [control=['if'], data=[]]
# need to run as root but not running as root.
# usually happens when doing TCP and ICMP traceroute.
if 'enough privileges' in line:
self.error = True
self.kill_switch()
self.stopped = True # depends on [control=['if'], data=[]]
# name resolution failed
if 'service not known' in line:
self.error = True
self.kill_switch()
self.stopped = True # depends on [control=['if'], data=[]]
|
def find_node(endpoint_url=None, nid=None, selector=None, name=None, cid=None, pnid=None):
"""
find node according to endpoint url or node ID. if both are defined then search will focus on ID only
:param endpoint_url: endpoint's url owned by node to found
:param nid: node id
:param selector: selector string like <node fiel> <operation (= { =, !=, >=, >, <, <= , like, =~})>
<value (= { number, String, regex })>
:param name: node name to found in container with provided cid or in parent node with provided pnid
:param cid: container id
:param pnid: parent node id
:return: the found node or None if not found
"""
LOGGER.debug("NodeService.find_node")
ret = None
if (nid is None or not nid) and (endpoint_url is None or not endpoint_url) and \
(selector is None or not selector) and (name is None or not name):
raise exceptions.ArianeCallParametersError('id and endpoint_url and selector and name')
if (nid is not None and nid) and \
((endpoint_url is not None and endpoint_url) or (selector is not None and selector) or
(name is not None and name and ((cid is not None and cid) or (pnid is not None and pnid)))):
LOGGER.debug('NodeService.find_node - Both id and other search params are defined. '
'Will give you search on id.')
# traceback.print_stack()
endpoint_url = None
selector = None
name = None
cid = None
pnid = None
if (endpoint_url is not None and endpoint_url) and \
((selector is not None and selector) or
(name is not None and name and ((cid is not None and cid) or (pnid is not None and pnid)))):
LOGGER.warning('NodeService.find_node - Both endpoint url other search params are defined. '
'Will give you search based on endpoint url')
# traceback.print_stack()
selector = None
name = None
cid = None
pnid = None
if (selector is not None and selector) and \
(name is not None and name and ((cid is not None and cid) or (pnid is not None and pnid))):
LOGGER.warning('NodeService.find_node - Both selector other search params are defined. '
'Will give you search based on selector')
# traceback.print_stack()
name = None
cid = None
pnid = None
if (name is not None and name) and ((cid is not None and cid) and (pnid is not None and pnid)):
LOGGER.warning('NodeService.find_node - search node by name : '
'both container ID and parent node ID are defined. '
'Will give you search based on parent node id')
# traceback.print_stack()
cid = None
if (name is not None and name) and ((cid is None or not cid) and (pnid is None or not pnid)):
raise exceptions.ArianeCallParametersError('cid and pnid')
params = None
return_set_of_nodes = False
if nid is not None and nid:
params = SessionService.complete_transactional_req({'ID': nid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNode'
elif endpoint_url is not None and endpoint_url:
params = SessionService.complete_transactional_req({'endpointURL': endpoint_url})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByEndpointURL'
elif selector is not None and selector:
params = SessionService.complete_transactional_req({'selector': selector})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodes'
return_set_of_nodes = True
elif name is not None and name:
if cid is not None and cid:
params = SessionService.complete_transactional_req({'name': name, 'containerID': cid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByName'
elif pnid is not None and pnid:
params = SessionService.complete_transactional_req({'name': name, 'parentNodeID': pnid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByName'
if params is not None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if cid is not None:
response = MappingService.requester.call(args)
else:
response = NodeService.requester.call(args)
else:
response = NodeService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc == 0:
if return_set_of_nodes:
ret = []
for node in response.response_content['nodes']:
ret.append(Node.json_2_node(node))
else:
ret = Node.json_2_node(response.response_content)
elif response.rc != 404:
err_msg = 'NodeService.find_node - Problem while searching node (id:' + str(nid) + \
', primary admin gate url ' \
+ str(endpoint_url) + ' ). ' + \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("NodeService.find_node",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
return ret
|
def function[find_node, parameter[endpoint_url, nid, selector, name, cid, pnid]]:
constant[
find node according to endpoint url or node ID. if both are defined then search will focus on ID only
:param endpoint_url: endpoint's url owned by node to found
:param nid: node id
:param selector: selector string like <node fiel> <operation (= { =, !=, >=, >, <, <= , like, =~})>
<value (= { number, String, regex })>
:param name: node name to found in container with provided cid or in parent node with provided pnid
:param cid: container id
:param pnid: parent node id
:return: the found node or None if not found
]
call[name[LOGGER].debug, parameter[constant[NodeService.find_node]]]
variable[ret] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f7213f0> begin[:]
<ast.Raise object at 0x7da18f723ee0>
if <ast.BoolOp object at 0x7da18f7206d0> begin[:]
call[name[LOGGER].debug, parameter[constant[NodeService.find_node - Both id and other search params are defined. Will give you search on id.]]]
variable[endpoint_url] assign[=] constant[None]
variable[selector] assign[=] constant[None]
variable[name] assign[=] constant[None]
variable[cid] assign[=] constant[None]
variable[pnid] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f722e30> begin[:]
call[name[LOGGER].warning, parameter[constant[NodeService.find_node - Both endpoint url other search params are defined. Will give you search based on endpoint url]]]
variable[selector] assign[=] constant[None]
variable[name] assign[=] constant[None]
variable[cid] assign[=] constant[None]
variable[pnid] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f7237f0> begin[:]
call[name[LOGGER].warning, parameter[constant[NodeService.find_node - Both selector other search params are defined. Will give you search based on selector]]]
variable[name] assign[=] constant[None]
variable[cid] assign[=] constant[None]
variable[pnid] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f7209a0> begin[:]
call[name[LOGGER].warning, parameter[constant[NodeService.find_node - search node by name : both container ID and parent node ID are defined. Will give you search based on parent node id]]]
variable[cid] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f722020> begin[:]
<ast.Raise object at 0x7da18c4ce3e0>
variable[params] assign[=] constant[None]
variable[return_set_of_nodes] assign[=] constant[False]
if <ast.BoolOp object at 0x7da18c4cdf30> begin[:]
variable[params] assign[=] call[name[SessionService].complete_transactional_req, parameter[dictionary[[<ast.Constant object at 0x7da18c4ced70>], [<ast.Name object at 0x7da18c4ce500>]]]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
call[name[params]][constant[OPERATION]] assign[=] constant[getNode]
if compare[name[params] is_not constant[None]] begin[:]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfbe0>], [<ast.Name object at 0x7da18c4ceef0>]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
if compare[name[cid] is_not constant[None]] begin[:]
variable[response] assign[=] call[name[MappingService].requester.call, parameter[name[args]]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
variable[response] assign[=] call[name[response].get, parameter[]]
if compare[name[response].rc equal[==] constant[0]] begin[:]
if name[return_set_of_nodes] begin[:]
variable[ret] assign[=] list[[]]
for taget[name[node]] in starred[call[name[response].response_content][constant[nodes]]] begin[:]
call[name[ret].append, parameter[call[name[Node].json_2_node, parameter[name[node]]]]]
return[name[ret]]
|
keyword[def] identifier[find_node] ( identifier[endpoint_url] = keyword[None] , identifier[nid] = keyword[None] , identifier[selector] = keyword[None] , identifier[name] = keyword[None] , identifier[cid] = keyword[None] , identifier[pnid] = keyword[None] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[ret] = keyword[None]
keyword[if] ( identifier[nid] keyword[is] keyword[None] keyword[or] keyword[not] identifier[nid] ) keyword[and] ( identifier[endpoint_url] keyword[is] keyword[None] keyword[or] keyword[not] identifier[endpoint_url] ) keyword[and] ( identifier[selector] keyword[is] keyword[None] keyword[or] keyword[not] identifier[selector] ) keyword[and] ( identifier[name] keyword[is] keyword[None] keyword[or] keyword[not] identifier[name] ):
keyword[raise] identifier[exceptions] . identifier[ArianeCallParametersError] ( literal[string] )
keyword[if] ( identifier[nid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nid] ) keyword[and] (( identifier[endpoint_url] keyword[is] keyword[not] keyword[None] keyword[and] identifier[endpoint_url] ) keyword[or] ( identifier[selector] keyword[is] keyword[not] keyword[None] keyword[and] identifier[selector] ) keyword[or]
( identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] keyword[and] (( identifier[cid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cid] ) keyword[or] ( identifier[pnid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pnid] )))):
identifier[LOGGER] . identifier[debug] ( literal[string]
literal[string] )
identifier[endpoint_url] = keyword[None]
identifier[selector] = keyword[None]
identifier[name] = keyword[None]
identifier[cid] = keyword[None]
identifier[pnid] = keyword[None]
keyword[if] ( identifier[endpoint_url] keyword[is] keyword[not] keyword[None] keyword[and] identifier[endpoint_url] ) keyword[and] (( identifier[selector] keyword[is] keyword[not] keyword[None] keyword[and] identifier[selector] ) keyword[or]
( identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] keyword[and] (( identifier[cid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cid] ) keyword[or] ( identifier[pnid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pnid] )))):
identifier[LOGGER] . identifier[warning] ( literal[string]
literal[string] )
identifier[selector] = keyword[None]
identifier[name] = keyword[None]
identifier[cid] = keyword[None]
identifier[pnid] = keyword[None]
keyword[if] ( identifier[selector] keyword[is] keyword[not] keyword[None] keyword[and] identifier[selector] ) keyword[and] ( identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] keyword[and] (( identifier[cid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cid] ) keyword[or] ( identifier[pnid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pnid] ))):
identifier[LOGGER] . identifier[warning] ( literal[string]
literal[string] )
identifier[name] = keyword[None]
identifier[cid] = keyword[None]
identifier[pnid] = keyword[None]
keyword[if] ( identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] ) keyword[and] (( identifier[cid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cid] ) keyword[and] ( identifier[pnid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pnid] )):
identifier[LOGGER] . identifier[warning] ( literal[string]
literal[string]
literal[string] )
identifier[cid] = keyword[None]
keyword[if] ( identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] ) keyword[and] (( identifier[cid] keyword[is] keyword[None] keyword[or] keyword[not] identifier[cid] ) keyword[and] ( identifier[pnid] keyword[is] keyword[None] keyword[or] keyword[not] identifier[pnid] )):
keyword[raise] identifier[exceptions] . identifier[ArianeCallParametersError] ( literal[string] )
identifier[params] = keyword[None]
identifier[return_set_of_nodes] = keyword[False]
keyword[if] identifier[nid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nid] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[nid] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[elif] identifier[endpoint_url] keyword[is] keyword[not] keyword[None] keyword[and] identifier[endpoint_url] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[endpoint_url] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[elif] identifier[selector] keyword[is] keyword[not] keyword[None] keyword[and] identifier[selector] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[selector] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
identifier[return_set_of_nodes] = keyword[True]
keyword[elif] identifier[name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[name] :
keyword[if] identifier[cid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cid] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[name] , literal[string] : identifier[cid] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[elif] identifier[pnid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pnid] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[name] , literal[string] : identifier[pnid] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[if] identifier[params] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[args] ={ literal[string] : identifier[params] }
keyword[else] :
identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[params] }
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
keyword[if] identifier[cid] keyword[is] keyword[not] keyword[None] :
identifier[response] = identifier[MappingService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[else] :
identifier[response] = identifier[NodeService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[else] :
identifier[response] = identifier[NodeService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[response] = identifier[response] . identifier[get] ()
keyword[if] identifier[response] . identifier[rc] == literal[int] :
keyword[if] identifier[return_set_of_nodes] :
identifier[ret] =[]
keyword[for] identifier[node] keyword[in] identifier[response] . identifier[response_content] [ literal[string] ]:
identifier[ret] . identifier[append] ( identifier[Node] . identifier[json_2_node] ( identifier[node] ))
keyword[else] :
identifier[ret] = identifier[Node] . identifier[json_2_node] ( identifier[response] . identifier[response_content] )
keyword[elif] identifier[response] . identifier[rc] != literal[int] :
identifier[err_msg] = literal[string] + identifier[str] ( identifier[nid] )+ literal[string] + identifier[str] ( identifier[endpoint_url] )+ literal[string] + literal[string] + identifier[str] ( identifier[response] . identifier[response_content] )+ literal[string] + identifier[str] ( identifier[response] . identifier[error_message] )+ literal[string] + identifier[str] ( identifier[response] . identifier[rc] )+ literal[string]
identifier[LOGGER] . identifier[warning] ( identifier[err_msg] )
keyword[if] identifier[response] . identifier[rc] == literal[int] keyword[and] identifier[ArianeMappingOverloadError] . identifier[ERROR_MSG] keyword[in] identifier[response] . identifier[error_message] :
keyword[raise] identifier[ArianeMappingOverloadError] ( literal[string] ,
identifier[ArianeMappingOverloadError] . identifier[ERROR_MSG] )
keyword[return] identifier[ret]
|
def find_node(endpoint_url=None, nid=None, selector=None, name=None, cid=None, pnid=None):
"""
find node according to endpoint url or node ID. if both are defined then search will focus on ID only
:param endpoint_url: endpoint's url owned by node to found
:param nid: node id
:param selector: selector string like <node fiel> <operation (= { =, !=, >=, >, <, <= , like, =~})>
<value (= { number, String, regex })>
:param name: node name to found in container with provided cid or in parent node with provided pnid
:param cid: container id
:param pnid: parent node id
:return: the found node or None if not found
"""
LOGGER.debug('NodeService.find_node')
ret = None
if (nid is None or not nid) and (endpoint_url is None or not endpoint_url) and (selector is None or not selector) and (name is None or not name):
raise exceptions.ArianeCallParametersError('id and endpoint_url and selector and name') # depends on [control=['if'], data=[]]
if (nid is not None and nid) and (endpoint_url is not None and endpoint_url or (selector is not None and selector) or (name is not None and name and (cid is not None and cid or (pnid is not None and pnid)))):
LOGGER.debug('NodeService.find_node - Both id and other search params are defined. Will give you search on id.')
# traceback.print_stack()
endpoint_url = None
selector = None
name = None
cid = None
pnid = None # depends on [control=['if'], data=[]]
if (endpoint_url is not None and endpoint_url) and (selector is not None and selector or (name is not None and name and (cid is not None and cid or (pnid is not None and pnid)))):
LOGGER.warning('NodeService.find_node - Both endpoint url other search params are defined. Will give you search based on endpoint url')
# traceback.print_stack()
selector = None
name = None
cid = None
pnid = None # depends on [control=['if'], data=[]]
if (selector is not None and selector) and (name is not None and name and (cid is not None and cid or (pnid is not None and pnid))):
LOGGER.warning('NodeService.find_node - Both selector other search params are defined. Will give you search based on selector')
# traceback.print_stack()
name = None
cid = None
pnid = None # depends on [control=['if'], data=[]]
if (name is not None and name) and ((cid is not None and cid) and (pnid is not None and pnid)):
LOGGER.warning('NodeService.find_node - search node by name : both container ID and parent node ID are defined. Will give you search based on parent node id')
# traceback.print_stack()
cid = None # depends on [control=['if'], data=[]]
if (name is not None and name) and ((cid is None or not cid) and (pnid is None or not pnid)):
raise exceptions.ArianeCallParametersError('cid and pnid') # depends on [control=['if'], data=[]]
params = None
return_set_of_nodes = False
if nid is not None and nid:
params = SessionService.complete_transactional_req({'ID': nid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNode' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif endpoint_url is not None and endpoint_url:
params = SessionService.complete_transactional_req({'endpointURL': endpoint_url})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByEndpointURL' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif selector is not None and selector:
params = SessionService.complete_transactional_req({'selector': selector})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodes' # depends on [control=['if'], data=[]]
return_set_of_nodes = True # depends on [control=['if'], data=[]]
elif name is not None and name:
if cid is not None and cid:
params = SessionService.complete_transactional_req({'name': name, 'containerID': cid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByName' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif pnid is not None and pnid:
params = SessionService.complete_transactional_req({'name': name, 'parentNodeID': pnid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getNodeByName' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if params is not None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
args = {'properties': params} # depends on [control=['if'], data=[]]
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if cid is not None:
response = MappingService.requester.call(args) # depends on [control=['if'], data=[]]
else:
response = NodeService.requester.call(args) # depends on [control=['if'], data=[]]
else:
response = NodeService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get() # depends on [control=['if'], data=[]]
if response.rc == 0:
if return_set_of_nodes:
ret = []
for node in response.response_content['nodes']:
ret.append(Node.json_2_node(node)) # depends on [control=['for'], data=['node']] # depends on [control=['if'], data=[]]
else:
ret = Node.json_2_node(response.response_content) # depends on [control=['if'], data=[]]
elif response.rc != 404:
err_msg = 'NodeService.find_node - Problem while searching node (id:' + str(nid) + ', primary admin gate url ' + str(endpoint_url) + ' ). ' + 'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + ' (' + str(response.rc) + ')'
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError('NodeService.find_node', ArianeMappingOverloadError.ERROR_MSG) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['params']]
# traceback.print_stack()
return ret
|
def encode_corpus(self, corpus, output_path):
"""
Encode all utterances of the given corpus and store them in a :class:`audiomate.container.Container`.
Args:
corpus (Corpus): The corpus to process.
output_path (str): The path to store the container with the encoded data.
Returns:
Container: The container with the encoded data.
"""
out_container = containers.Container(output_path)
out_container.open()
for utterance in corpus.utterances.values():
data = self.encode_utterance(utterance, corpus=corpus)
out_container.set(utterance.idx, data)
out_container.close()
return out_container
|
def function[encode_corpus, parameter[self, corpus, output_path]]:
constant[
Encode all utterances of the given corpus and store them in a :class:`audiomate.container.Container`.
Args:
corpus (Corpus): The corpus to process.
output_path (str): The path to store the container with the encoded data.
Returns:
Container: The container with the encoded data.
]
variable[out_container] assign[=] call[name[containers].Container, parameter[name[output_path]]]
call[name[out_container].open, parameter[]]
for taget[name[utterance]] in starred[call[name[corpus].utterances.values, parameter[]]] begin[:]
variable[data] assign[=] call[name[self].encode_utterance, parameter[name[utterance]]]
call[name[out_container].set, parameter[name[utterance].idx, name[data]]]
call[name[out_container].close, parameter[]]
return[name[out_container]]
|
keyword[def] identifier[encode_corpus] ( identifier[self] , identifier[corpus] , identifier[output_path] ):
literal[string]
identifier[out_container] = identifier[containers] . identifier[Container] ( identifier[output_path] )
identifier[out_container] . identifier[open] ()
keyword[for] identifier[utterance] keyword[in] identifier[corpus] . identifier[utterances] . identifier[values] ():
identifier[data] = identifier[self] . identifier[encode_utterance] ( identifier[utterance] , identifier[corpus] = identifier[corpus] )
identifier[out_container] . identifier[set] ( identifier[utterance] . identifier[idx] , identifier[data] )
identifier[out_container] . identifier[close] ()
keyword[return] identifier[out_container]
|
def encode_corpus(self, corpus, output_path):
"""
Encode all utterances of the given corpus and store them in a :class:`audiomate.container.Container`.
Args:
corpus (Corpus): The corpus to process.
output_path (str): The path to store the container with the encoded data.
Returns:
Container: The container with the encoded data.
"""
out_container = containers.Container(output_path)
out_container.open()
for utterance in corpus.utterances.values():
data = self.encode_utterance(utterance, corpus=corpus)
out_container.set(utterance.idx, data) # depends on [control=['for'], data=['utterance']]
out_container.close()
return out_container
|
def split_stdout_lines(stdout):
"""
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
"""
# all the NetMHC formats use lines full of dashes before any actual
# binding results
seen_dash = False
for l in stdout.split("\n"):
l = l.strip()
# wait for a line like '----------' before trying to parse entries
# have to include multiple dashes here since NetMHC 4.0 sometimes
# gives negative positions in its "peptide" input mode
if l.startswith("---"):
seen_dash = True
continue
if not seen_dash:
continue
# ignore empty lines and comments
if not l or l.startswith("#"):
continue
# beginning of headers in NetMHC
if any(l.startswith(word) for word in NETMHC_TOKENS):
continue
yield l.split()
|
def function[split_stdout_lines, parameter[stdout]]:
constant[
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
]
variable[seen_dash] assign[=] constant[False]
for taget[name[l]] in starred[call[name[stdout].split, parameter[constant[
]]]] begin[:]
variable[l] assign[=] call[name[l].strip, parameter[]]
if call[name[l].startswith, parameter[constant[---]]] begin[:]
variable[seen_dash] assign[=] constant[True]
continue
if <ast.UnaryOp object at 0x7da1b00fc1f0> begin[:]
continue
if <ast.BoolOp object at 0x7da1b00fc490> begin[:]
continue
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b00fdcc0>]] begin[:]
continue
<ast.Yield object at 0x7da1b00fdba0>
|
keyword[def] identifier[split_stdout_lines] ( identifier[stdout] ):
literal[string]
identifier[seen_dash] = keyword[False]
keyword[for] identifier[l] keyword[in] identifier[stdout] . identifier[split] ( literal[string] ):
identifier[l] = identifier[l] . identifier[strip] ()
keyword[if] identifier[l] . identifier[startswith] ( literal[string] ):
identifier[seen_dash] = keyword[True]
keyword[continue]
keyword[if] keyword[not] identifier[seen_dash] :
keyword[continue]
keyword[if] keyword[not] identifier[l] keyword[or] identifier[l] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] identifier[any] ( identifier[l] . identifier[startswith] ( identifier[word] ) keyword[for] identifier[word] keyword[in] identifier[NETMHC_TOKENS] ):
keyword[continue]
keyword[yield] identifier[l] . identifier[split] ()
|
def split_stdout_lines(stdout):
"""
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
"""
# all the NetMHC formats use lines full of dashes before any actual
# binding results
seen_dash = False
for l in stdout.split('\n'):
l = l.strip()
# wait for a line like '----------' before trying to parse entries
# have to include multiple dashes here since NetMHC 4.0 sometimes
# gives negative positions in its "peptide" input mode
if l.startswith('---'):
seen_dash = True
continue # depends on [control=['if'], data=[]]
if not seen_dash:
continue # depends on [control=['if'], data=[]]
# ignore empty lines and comments
if not l or l.startswith('#'):
continue # depends on [control=['if'], data=[]]
# beginning of headers in NetMHC
if any((l.startswith(word) for word in NETMHC_TOKENS)):
continue # depends on [control=['if'], data=[]]
yield l.split() # depends on [control=['for'], data=['l']]
|
def SystemFee(self):
"""
Get the system fee.
Returns:
Fixed8:
"""
if self.AssetType == AssetType.GoverningToken or self.AssetType == AssetType.UtilityToken:
return Fixed8.Zero()
return super(RegisterTransaction, self).SystemFee()
|
def function[SystemFee, parameter[self]]:
constant[
Get the system fee.
Returns:
Fixed8:
]
if <ast.BoolOp object at 0x7da1b22ae530> begin[:]
return[call[name[Fixed8].Zero, parameter[]]]
return[call[call[name[super], parameter[name[RegisterTransaction], name[self]]].SystemFee, parameter[]]]
|
keyword[def] identifier[SystemFee] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[AssetType] == identifier[AssetType] . identifier[GoverningToken] keyword[or] identifier[self] . identifier[AssetType] == identifier[AssetType] . identifier[UtilityToken] :
keyword[return] identifier[Fixed8] . identifier[Zero] ()
keyword[return] identifier[super] ( identifier[RegisterTransaction] , identifier[self] ). identifier[SystemFee] ()
|
def SystemFee(self):
"""
Get the system fee.
Returns:
Fixed8:
"""
if self.AssetType == AssetType.GoverningToken or self.AssetType == AssetType.UtilityToken:
return Fixed8.Zero() # depends on [control=['if'], data=[]]
return super(RegisterTransaction, self).SystemFee()
|
def lux_unit(self):
"""Get unit of lux."""
if CONST.UNIT_LUX in self._get_status(CONST.LUX_STATUS_KEY):
return CONST.LUX
return None
|
def function[lux_unit, parameter[self]]:
constant[Get unit of lux.]
if compare[name[CONST].UNIT_LUX in call[name[self]._get_status, parameter[name[CONST].LUX_STATUS_KEY]]] begin[:]
return[name[CONST].LUX]
return[constant[None]]
|
keyword[def] identifier[lux_unit] ( identifier[self] ):
literal[string]
keyword[if] identifier[CONST] . identifier[UNIT_LUX] keyword[in] identifier[self] . identifier[_get_status] ( identifier[CONST] . identifier[LUX_STATUS_KEY] ):
keyword[return] identifier[CONST] . identifier[LUX]
keyword[return] keyword[None]
|
def lux_unit(self):
"""Get unit of lux."""
if CONST.UNIT_LUX in self._get_status(CONST.LUX_STATUS_KEY):
return CONST.LUX # depends on [control=['if'], data=[]]
return None
|
def get_coin_list(coins='all'):
"""
Get general information about all the coins available on
cryptocompare.com.
Args:
coins: Default value of 'all' returns information about all the coins
available on the site. Otherwise a single string or list of coin
symbols can be used.
Returns:
The function returns a dictionairy containing individual dictionairies
for the coins specified by the input. The key of the top dictionary
corresponds to the coin symbol. Each coin dictionary has the following
structure:
{coin_symbol1: {'Algorithm' : ...,
'CoinName': ...,
'FullName': ...,
'FullyPremined': ...,
'Id': ...,
'ImageUrl': ...,
'Name': ...,
'PreMinedValue': ...,
'ProofType': ...,
'SortOrder': ...,
'TotalCoinsFreeFloat': ...,
'TotalCoinSupply': ...,
'Url': ...},
coin_symbol2: {...},
...}
"""
# convert single coins input to single element lists
if not isinstance(coins, list) and coins != 'all':
coins = [coins]
# load data
url = build_url('coinlist')
data = load_data(url)['Data']
# coins specified
if coins != 'all':
data = {c: data[c] for c in coins}
return data
|
def function[get_coin_list, parameter[coins]]:
constant[
Get general information about all the coins available on
cryptocompare.com.
Args:
coins: Default value of 'all' returns information about all the coins
available on the site. Otherwise a single string or list of coin
symbols can be used.
Returns:
The function returns a dictionairy containing individual dictionairies
for the coins specified by the input. The key of the top dictionary
corresponds to the coin symbol. Each coin dictionary has the following
structure:
{coin_symbol1: {'Algorithm' : ...,
'CoinName': ...,
'FullName': ...,
'FullyPremined': ...,
'Id': ...,
'ImageUrl': ...,
'Name': ...,
'PreMinedValue': ...,
'ProofType': ...,
'SortOrder': ...,
'TotalCoinsFreeFloat': ...,
'TotalCoinSupply': ...,
'Url': ...},
coin_symbol2: {...},
...}
]
if <ast.BoolOp object at 0x7da20e955090> begin[:]
variable[coins] assign[=] list[[<ast.Name object at 0x7da20e955330>]]
variable[url] assign[=] call[name[build_url], parameter[constant[coinlist]]]
variable[data] assign[=] call[call[name[load_data], parameter[name[url]]]][constant[Data]]
if compare[name[coins] not_equal[!=] constant[all]] begin[:]
variable[data] assign[=] <ast.DictComp object at 0x7da20e957340>
return[name[data]]
|
keyword[def] identifier[get_coin_list] ( identifier[coins] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[coins] , identifier[list] ) keyword[and] identifier[coins] != literal[string] :
identifier[coins] =[ identifier[coins] ]
identifier[url] = identifier[build_url] ( literal[string] )
identifier[data] = identifier[load_data] ( identifier[url] )[ literal[string] ]
keyword[if] identifier[coins] != literal[string] :
identifier[data] ={ identifier[c] : identifier[data] [ identifier[c] ] keyword[for] identifier[c] keyword[in] identifier[coins] }
keyword[return] identifier[data]
|
def get_coin_list(coins='all'):
"""
Get general information about all the coins available on
cryptocompare.com.
Args:
coins: Default value of 'all' returns information about all the coins
available on the site. Otherwise a single string or list of coin
symbols can be used.
Returns:
The function returns a dictionairy containing individual dictionairies
for the coins specified by the input. The key of the top dictionary
corresponds to the coin symbol. Each coin dictionary has the following
structure:
{coin_symbol1: {'Algorithm' : ...,
'CoinName': ...,
'FullName': ...,
'FullyPremined': ...,
'Id': ...,
'ImageUrl': ...,
'Name': ...,
'PreMinedValue': ...,
'ProofType': ...,
'SortOrder': ...,
'TotalCoinsFreeFloat': ...,
'TotalCoinSupply': ...,
'Url': ...},
coin_symbol2: {...},
...}
""" # convert single coins input to single element lists
if not isinstance(coins, list) and coins != 'all':
coins = [coins] # depends on [control=['if'], data=[]] # load data
url = build_url('coinlist')
data = load_data(url)['Data'] # coins specified
if coins != 'all':
data = {c: data[c] for c in coins} # depends on [control=['if'], data=['coins']]
return data
|
def Laliberte_heat_capacity_i(T, w_w, a1, a2, a3, a4, a5, a6):
r'''Calculate the heat capacity of a solute using the form proposed by [1]_
Parameters are needed, and a temperature, and water fraction.
.. math::
Cp_i = a_1 e^\alpha + a_5(1-w_w)^{a_6}
\alpha = a_2 t + a_3 \exp(0.01t) + a_4(1-w_w)
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
a1-a6 : floats
Function fit parameters
Returns
-------
Cp_i : float
Solute partial heat capacity, [J/kg/K]
Notes
-----
Units are Kelvin and J/kg/K.
Temperature range check is TODO
Examples
--------
>>> d = _Laliberte_Heat_Capacity_ParametersDict['7647-14-5']
>>> Laliberte_heat_capacity_i(1.5+273.15, 1-0.00398447, d["A1"], d["A2"], d["A3"], d["A4"], d["A5"], d["A6"])
-2930.7353945880477
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
t = T - 273.15
alpha = a2*t + a3*exp(0.01*t) + a4*(1. - w_w)
Cp_i = a1*exp(alpha) + a5*(1. - w_w)**a6
return Cp_i*1000.
|
def function[Laliberte_heat_capacity_i, parameter[T, w_w, a1, a2, a3, a4, a5, a6]]:
constant[Calculate the heat capacity of a solute using the form proposed by [1]_
Parameters are needed, and a temperature, and water fraction.
.. math::
Cp_i = a_1 e^\alpha + a_5(1-w_w)^{a_6}
\alpha = a_2 t + a_3 \exp(0.01t) + a_4(1-w_w)
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
a1-a6 : floats
Function fit parameters
Returns
-------
Cp_i : float
Solute partial heat capacity, [J/kg/K]
Notes
-----
Units are Kelvin and J/kg/K.
Temperature range check is TODO
Examples
--------
>>> d = _Laliberte_Heat_Capacity_ParametersDict['7647-14-5']
>>> Laliberte_heat_capacity_i(1.5+273.15, 1-0.00398447, d["A1"], d["A2"], d["A3"], d["A4"], d["A5"], d["A6"])
-2930.7353945880477
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
]
variable[t] assign[=] binary_operation[name[T] - constant[273.15]]
variable[alpha] assign[=] binary_operation[binary_operation[binary_operation[name[a2] * name[t]] + binary_operation[name[a3] * call[name[exp], parameter[binary_operation[constant[0.01] * name[t]]]]]] + binary_operation[name[a4] * binary_operation[constant[1.0] - name[w_w]]]]
variable[Cp_i] assign[=] binary_operation[binary_operation[name[a1] * call[name[exp], parameter[name[alpha]]]] + binary_operation[name[a5] * binary_operation[binary_operation[constant[1.0] - name[w_w]] ** name[a6]]]]
return[binary_operation[name[Cp_i] * constant[1000.0]]]
|
keyword[def] identifier[Laliberte_heat_capacity_i] ( identifier[T] , identifier[w_w] , identifier[a1] , identifier[a2] , identifier[a3] , identifier[a4] , identifier[a5] , identifier[a6] ):
literal[string]
identifier[t] = identifier[T] - literal[int]
identifier[alpha] = identifier[a2] * identifier[t] + identifier[a3] * identifier[exp] ( literal[int] * identifier[t] )+ identifier[a4] *( literal[int] - identifier[w_w] )
identifier[Cp_i] = identifier[a1] * identifier[exp] ( identifier[alpha] )+ identifier[a5] *( literal[int] - identifier[w_w] )** identifier[a6]
keyword[return] identifier[Cp_i] * literal[int]
|
def Laliberte_heat_capacity_i(T, w_w, a1, a2, a3, a4, a5, a6):
"""Calculate the heat capacity of a solute using the form proposed by [1]_
Parameters are needed, and a temperature, and water fraction.
.. math::
Cp_i = a_1 e^\\alpha + a_5(1-w_w)^{a_6}
\\alpha = a_2 t + a_3 \\exp(0.01t) + a_4(1-w_w)
Parameters
----------
T : float
Temperature of fluid [K]
w_w : float
Weight fraction of water in the solution
a1-a6 : floats
Function fit parameters
Returns
-------
Cp_i : float
Solute partial heat capacity, [J/kg/K]
Notes
-----
Units are Kelvin and J/kg/K.
Temperature range check is TODO
Examples
--------
>>> d = _Laliberte_Heat_Capacity_ParametersDict['7647-14-5']
>>> Laliberte_heat_capacity_i(1.5+273.15, 1-0.00398447, d["A1"], d["A2"], d["A3"], d["A4"], d["A5"], d["A6"])
-2930.7353945880477
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
"""
t = T - 273.15
alpha = a2 * t + a3 * exp(0.01 * t) + a4 * (1.0 - w_w)
Cp_i = a1 * exp(alpha) + a5 * (1.0 - w_w) ** a6
return Cp_i * 1000.0
|
def _get_view_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database view metadata
"""
self.log.debug('starting the ``_get_view_infos`` method')
sqlQuery = u"""
SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc
""" % locals()
viewInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in viewInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
viewInfo = cleanTable
self.log.debug('completed the ``_get_view_infos`` method')
return viewInfo
|
def function[_get_view_infos, parameter[self, trimmed]]:
constant[query the sherlock-catalogues database view metadata
]
call[name[self].log.debug, parameter[constant[starting the ``_get_view_infos`` method]]]
variable[sqlQuery] assign[=] binary_operation[constant[
SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc
] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
variable[viewInfo] assign[=] call[name[readquery], parameter[]]
if name[trimmed] begin[:]
variable[cleanTable] assign[=] list[[]]
for taget[name[r]] in starred[name[viewInfo]] begin[:]
variable[orow] assign[=] call[name[collections].OrderedDict, parameter[call[name[sorted], parameter[call[dictionary[[], []].items, parameter[]]]]]]
for taget[name[c]] in starred[name[self].basicColumns] begin[:]
if compare[name[c] in name[r]] begin[:]
call[name[orow]][name[c]] assign[=] call[name[r]][name[c]]
call[name[cleanTable].append, parameter[name[orow]]]
variable[viewInfo] assign[=] name[cleanTable]
call[name[self].log.debug, parameter[constant[completed the ``_get_view_infos`` method]]]
return[name[viewInfo]]
|
keyword[def] identifier[_get_view_infos] (
identifier[self] ,
identifier[trimmed] = keyword[False] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[sqlQuery] = literal[string] % identifier[locals] ()
identifier[viewInfo] = identifier[readquery] (
identifier[log] = identifier[self] . identifier[log] ,
identifier[sqlQuery] = identifier[sqlQuery] ,
identifier[dbConn] = identifier[self] . identifier[cataloguesDbConn] ,
identifier[quiet] = keyword[False]
)
keyword[if] identifier[trimmed] :
identifier[cleanTable] =[]
keyword[for] identifier[r] keyword[in] identifier[viewInfo] :
identifier[orow] = identifier[collections] . identifier[OrderedDict] ( identifier[sorted] ({}. identifier[items] ()))
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[basicColumns] :
keyword[if] identifier[c] keyword[in] identifier[r] :
identifier[orow] [ identifier[c] ]= identifier[r] [ identifier[c] ]
identifier[cleanTable] . identifier[append] ( identifier[orow] )
identifier[viewInfo] = identifier[cleanTable]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[return] identifier[viewInfo]
|
def _get_view_infos(self, trimmed=False):
"""query the sherlock-catalogues database view metadata
"""
self.log.debug('starting the ``_get_view_infos`` method')
sqlQuery = u'\n SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc\n ' % locals()
viewInfo = readquery(log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False)
if trimmed:
cleanTable = []
for r in viewInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c] # depends on [control=['if'], data=['c', 'r']] # depends on [control=['for'], data=['c']]
cleanTable.append(orow) # depends on [control=['for'], data=['r']]
viewInfo = cleanTable # depends on [control=['if'], data=[]]
self.log.debug('completed the ``_get_view_infos`` method')
return viewInfo
|
def _is_sort_order_unique_together_with_something(self):
"""
Is the sort_order field unique_together with something
"""
unique_together = self._meta.unique_together
for fields in unique_together:
if 'sort_order' in fields and len(fields) > 1:
return True
return False
|
def function[_is_sort_order_unique_together_with_something, parameter[self]]:
constant[
Is the sort_order field unique_together with something
]
variable[unique_together] assign[=] name[self]._meta.unique_together
for taget[name[fields]] in starred[name[unique_together]] begin[:]
if <ast.BoolOp object at 0x7da20c991cc0> begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[_is_sort_order_unique_together_with_something] ( identifier[self] ):
literal[string]
identifier[unique_together] = identifier[self] . identifier[_meta] . identifier[unique_together]
keyword[for] identifier[fields] keyword[in] identifier[unique_together] :
keyword[if] literal[string] keyword[in] identifier[fields] keyword[and] identifier[len] ( identifier[fields] )> literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def _is_sort_order_unique_together_with_something(self):
"""
Is the sort_order field unique_together with something
"""
unique_together = self._meta.unique_together
for fields in unique_together:
if 'sort_order' in fields and len(fields) > 1:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fields']]
return False
|
def parse_JSON(self, JSON_string):
"""
Parses an *UVIndex* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *UVIndex* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
# -- reference time
reference_time = d['date']
# -- reception time (now)
reception_time = timeutils.now('unix')
# -- location
lon = float(d['lon'])
lat = float(d['lat'])
place = location.Location(None, lon, lat, None)
# -- UV intensity
uv_intensity = float(d['value'])
except KeyError:
raise parse_response_error.ParseResponseError(
''.join([__name__, ': impossible to parse UV Index']))
return uvindex.UVIndex(reference_time, place, uv_intensity,
reception_time)
|
def function[parse_JSON, parameter[self, JSON_string]]:
constant[
Parses an *UVIndex* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *UVIndex* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
]
if compare[name[JSON_string] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6e41c0>
variable[d] assign[=] call[name[json].loads, parameter[name[JSON_string]]]
<ast.Try object at 0x7da20c6e79d0>
return[call[name[uvindex].UVIndex, parameter[name[reference_time], name[place], name[uv_intensity], name[reception_time]]]]
|
keyword[def] identifier[parse_JSON] ( identifier[self] , identifier[JSON_string] ):
literal[string]
keyword[if] identifier[JSON_string] keyword[is] keyword[None] :
keyword[raise] identifier[parse_response_error] . identifier[ParseResponseError] ( literal[string] )
identifier[d] = identifier[json] . identifier[loads] ( identifier[JSON_string] )
keyword[try] :
identifier[reference_time] = identifier[d] [ literal[string] ]
identifier[reception_time] = identifier[timeutils] . identifier[now] ( literal[string] )
identifier[lon] = identifier[float] ( identifier[d] [ literal[string] ])
identifier[lat] = identifier[float] ( identifier[d] [ literal[string] ])
identifier[place] = identifier[location] . identifier[Location] ( keyword[None] , identifier[lon] , identifier[lat] , keyword[None] )
identifier[uv_intensity] = identifier[float] ( identifier[d] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[parse_response_error] . identifier[ParseResponseError] (
literal[string] . identifier[join] ([ identifier[__name__] , literal[string] ]))
keyword[return] identifier[uvindex] . identifier[UVIndex] ( identifier[reference_time] , identifier[place] , identifier[uv_intensity] ,
identifier[reception_time] )
|
def parse_JSON(self, JSON_string):
"""
Parses an *UVIndex* instance out of raw JSON data. Only certain
properties of the data are used: if these properties are not found or
cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: an *UVIndex* instance or ``None`` if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the JSON
string embeds an HTTP status error
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None') # depends on [control=['if'], data=[]]
d = json.loads(JSON_string)
try:
# -- reference time
reference_time = d['date']
# -- reception time (now)
reception_time = timeutils.now('unix')
# -- location
lon = float(d['lon'])
lat = float(d['lat'])
place = location.Location(None, lon, lat, None)
# -- UV intensity
uv_intensity = float(d['value']) # depends on [control=['try'], data=[]]
except KeyError:
raise parse_response_error.ParseResponseError(''.join([__name__, ': impossible to parse UV Index'])) # depends on [control=['except'], data=[]]
return uvindex.UVIndex(reference_time, place, uv_intensity, reception_time)
|
def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument
"""Create a Local Model Object
Args:
ModelName (str): the Model Name
PrimaryContainer (dict): a SageMaker primary container definition
"""
LocalSagemakerClient._models[ModelName] = _LocalModel(ModelName, PrimaryContainer)
|
def function[create_model, parameter[self, ModelName, PrimaryContainer]]:
constant[Create a Local Model Object
Args:
ModelName (str): the Model Name
PrimaryContainer (dict): a SageMaker primary container definition
]
call[name[LocalSagemakerClient]._models][name[ModelName]] assign[=] call[name[_LocalModel], parameter[name[ModelName], name[PrimaryContainer]]]
|
keyword[def] identifier[create_model] ( identifier[self] , identifier[ModelName] , identifier[PrimaryContainer] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[LocalSagemakerClient] . identifier[_models] [ identifier[ModelName] ]= identifier[_LocalModel] ( identifier[ModelName] , identifier[PrimaryContainer] )
|
def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument
'Create a Local Model Object\n\n Args:\n ModelName (str): the Model Name\n PrimaryContainer (dict): a SageMaker primary container definition\n '
LocalSagemakerClient._models[ModelName] = _LocalModel(ModelName, PrimaryContainer)
|
def QA_util_time_stamp(time_):
"""
字符串 '2018-01-01 00:00:00' 转变成 float 类型时间 类似 time.time() 返回的类型
:param time_: 字符串str -- 数据格式 最好是%Y-%m-%d %H:%M:%S 中间要有空格
:return: 类型float
"""
if len(str(time_)) == 10:
# yyyy-mm-dd格式
return time.mktime(time.strptime(time_, '%Y-%m-%d'))
elif len(str(time_)) == 16:
# yyyy-mm-dd hh:mm格式
return time.mktime(time.strptime(time_, '%Y-%m-%d %H:%M'))
else:
timestr = str(time_)[0:19]
return time.mktime(time.strptime(timestr, '%Y-%m-%d %H:%M:%S'))
|
def function[QA_util_time_stamp, parameter[time_]]:
constant[
字符串 '2018-01-01 00:00:00' 转变成 float 类型时间 类似 time.time() 返回的类型
:param time_: 字符串str -- 数据格式 最好是%Y-%m-%d %H:%M:%S 中间要有空格
:return: 类型float
]
if compare[call[name[len], parameter[call[name[str], parameter[name[time_]]]]] equal[==] constant[10]] begin[:]
return[call[name[time].mktime, parameter[call[name[time].strptime, parameter[name[time_], constant[%Y-%m-%d]]]]]]
|
keyword[def] identifier[QA_util_time_stamp] ( identifier[time_] ):
literal[string]
keyword[if] identifier[len] ( identifier[str] ( identifier[time_] ))== literal[int] :
keyword[return] identifier[time] . identifier[mktime] ( identifier[time] . identifier[strptime] ( identifier[time_] , literal[string] ))
keyword[elif] identifier[len] ( identifier[str] ( identifier[time_] ))== literal[int] :
keyword[return] identifier[time] . identifier[mktime] ( identifier[time] . identifier[strptime] ( identifier[time_] , literal[string] ))
keyword[else] :
identifier[timestr] = identifier[str] ( identifier[time_] )[ literal[int] : literal[int] ]
keyword[return] identifier[time] . identifier[mktime] ( identifier[time] . identifier[strptime] ( identifier[timestr] , literal[string] ))
|
def QA_util_time_stamp(time_):
"""
字符串 '2018-01-01 00:00:00' 转变成 float 类型时间 类似 time.time() 返回的类型
:param time_: 字符串str -- 数据格式 最好是%Y-%m-%d %H:%M:%S 中间要有空格
:return: 类型float
"""
if len(str(time_)) == 10:
# yyyy-mm-dd格式
return time.mktime(time.strptime(time_, '%Y-%m-%d')) # depends on [control=['if'], data=[]]
elif len(str(time_)) == 16:
# yyyy-mm-dd hh:mm格式
return time.mktime(time.strptime(time_, '%Y-%m-%d %H:%M')) # depends on [control=['if'], data=[]]
else:
timestr = str(time_)[0:19]
return time.mktime(time.strptime(timestr, '%Y-%m-%d %H:%M:%S'))
|
def _get_major_minor_revision(self, version_string):
"""Split a version string into major, minor and (optionally)
revision parts.
This is complicated by the fact that a version string can be
something like 3.2b1."""
version = version_string.split(' ')[0].split('.')
v_major = int(version[0])
v_minor = int(re.match('\d+', version[1]).group())
if len(version) >= 3:
v_revision = int(re.match('\d+', version[2]).group())
else:
v_revision = 0
return v_major, v_minor, v_revision
|
def function[_get_major_minor_revision, parameter[self, version_string]]:
constant[Split a version string into major, minor and (optionally)
revision parts.
This is complicated by the fact that a version string can be
something like 3.2b1.]
variable[version] assign[=] call[call[call[name[version_string].split, parameter[constant[ ]]]][constant[0]].split, parameter[constant[.]]]
variable[v_major] assign[=] call[name[int], parameter[call[name[version]][constant[0]]]]
variable[v_minor] assign[=] call[name[int], parameter[call[call[name[re].match, parameter[constant[\d+], call[name[version]][constant[1]]]].group, parameter[]]]]
if compare[call[name[len], parameter[name[version]]] greater_or_equal[>=] constant[3]] begin[:]
variable[v_revision] assign[=] call[name[int], parameter[call[call[name[re].match, parameter[constant[\d+], call[name[version]][constant[2]]]].group, parameter[]]]]
return[tuple[[<ast.Name object at 0x7da20c76f3a0>, <ast.Name object at 0x7da20c76e2c0>, <ast.Name object at 0x7da20c76e020>]]]
|
keyword[def] identifier[_get_major_minor_revision] ( identifier[self] , identifier[version_string] ):
literal[string]
identifier[version] = identifier[version_string] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )
identifier[v_major] = identifier[int] ( identifier[version] [ literal[int] ])
identifier[v_minor] = identifier[int] ( identifier[re] . identifier[match] ( literal[string] , identifier[version] [ literal[int] ]). identifier[group] ())
keyword[if] identifier[len] ( identifier[version] )>= literal[int] :
identifier[v_revision] = identifier[int] ( identifier[re] . identifier[match] ( literal[string] , identifier[version] [ literal[int] ]). identifier[group] ())
keyword[else] :
identifier[v_revision] = literal[int]
keyword[return] identifier[v_major] , identifier[v_minor] , identifier[v_revision]
|
def _get_major_minor_revision(self, version_string):
"""Split a version string into major, minor and (optionally)
revision parts.
This is complicated by the fact that a version string can be
something like 3.2b1."""
version = version_string.split(' ')[0].split('.')
v_major = int(version[0])
v_minor = int(re.match('\\d+', version[1]).group())
if len(version) >= 3:
v_revision = int(re.match('\\d+', version[2]).group()) # depends on [control=['if'], data=[]]
else:
v_revision = 0
return (v_major, v_minor, v_revision)
|
def hook(klass):
"""
monkey-patch pdb.Pdb class
adds a 'vim' (and 'v') command:
it switches to debugging with vimpdb
"""
if not hasattr(klass, 'do_vim'):
setupMethod(klass, trace_dispatch)
klass.__bases__ += (SwitcherToVimpdb, )
|
def function[hook, parameter[klass]]:
constant[
monkey-patch pdb.Pdb class
adds a 'vim' (and 'v') command:
it switches to debugging with vimpdb
]
if <ast.UnaryOp object at 0x7da1b1040a90> begin[:]
call[name[setupMethod], parameter[name[klass], name[trace_dispatch]]]
<ast.AugAssign object at 0x7da1b1042c80>
|
keyword[def] identifier[hook] ( identifier[klass] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[klass] , literal[string] ):
identifier[setupMethod] ( identifier[klass] , identifier[trace_dispatch] )
identifier[klass] . identifier[__bases__] +=( identifier[SwitcherToVimpdb] ,)
|
def hook(klass):
"""
monkey-patch pdb.Pdb class
adds a 'vim' (and 'v') command:
it switches to debugging with vimpdb
"""
if not hasattr(klass, 'do_vim'):
setupMethod(klass, trace_dispatch)
klass.__bases__ += (SwitcherToVimpdb,) # depends on [control=['if'], data=[]]
|
def open_this(self, file, how): # type: (str,str) -> Any
"""
Open file while detecting encoding. Use cached when possible.
:param file:
:param how:
:return:
"""
# BUG: risky code here, allowing relative
if not file.startswith("/"):
# raise TypeError("this isn't absolute! We're siths, ya' know.")
pass
if file in self.found_encoding:
encoding = self.found_encoding[file]
else:
file_bytes = io.open(file, "rb").read()
if not file_bytes:
encoding = "utf-8"
else:
encoding_info = chardet.detect(file_bytes)
encoding = encoding_info["encoding"]
logger.debug(unicode(encoding))
try:
io.open(file, how, encoding=encoding).read()
except UnicodeDecodeError:
print(file)
print(file_bytes)
self.found_encoding[file] = encoding
return io.open(file, how, encoding=encoding)
|
def function[open_this, parameter[self, file, how]]:
constant[
Open file while detecting encoding. Use cached when possible.
:param file:
:param how:
:return:
]
if <ast.UnaryOp object at 0x7da18bc726b0> begin[:]
pass
if compare[name[file] in name[self].found_encoding] begin[:]
variable[encoding] assign[=] call[name[self].found_encoding][name[file]]
return[call[name[io].open, parameter[name[file], name[how]]]]
|
keyword[def] identifier[open_this] ( identifier[self] , identifier[file] , identifier[how] ):
literal[string]
keyword[if] keyword[not] identifier[file] . identifier[startswith] ( literal[string] ):
keyword[pass]
keyword[if] identifier[file] keyword[in] identifier[self] . identifier[found_encoding] :
identifier[encoding] = identifier[self] . identifier[found_encoding] [ identifier[file] ]
keyword[else] :
identifier[file_bytes] = identifier[io] . identifier[open] ( identifier[file] , literal[string] ). identifier[read] ()
keyword[if] keyword[not] identifier[file_bytes] :
identifier[encoding] = literal[string]
keyword[else] :
identifier[encoding_info] = identifier[chardet] . identifier[detect] ( identifier[file_bytes] )
identifier[encoding] = identifier[encoding_info] [ literal[string] ]
identifier[logger] . identifier[debug] ( identifier[unicode] ( identifier[encoding] ))
keyword[try] :
identifier[io] . identifier[open] ( identifier[file] , identifier[how] , identifier[encoding] = identifier[encoding] ). identifier[read] ()
keyword[except] identifier[UnicodeDecodeError] :
identifier[print] ( identifier[file] )
identifier[print] ( identifier[file_bytes] )
identifier[self] . identifier[found_encoding] [ identifier[file] ]= identifier[encoding]
keyword[return] identifier[io] . identifier[open] ( identifier[file] , identifier[how] , identifier[encoding] = identifier[encoding] )
|
def open_this(self, file, how): # type: (str,str) -> Any
'\n Open file while detecting encoding. Use cached when possible.\n :param file:\n :param how:\n :return:\n '
# BUG: risky code here, allowing relative
if not file.startswith('/'):
# raise TypeError("this isn't absolute! We're siths, ya' know.")
pass # depends on [control=['if'], data=[]]
if file in self.found_encoding:
encoding = self.found_encoding[file] # depends on [control=['if'], data=['file']]
else:
file_bytes = io.open(file, 'rb').read()
if not file_bytes:
encoding = 'utf-8' # depends on [control=['if'], data=[]]
else:
encoding_info = chardet.detect(file_bytes)
encoding = encoding_info['encoding']
logger.debug(unicode(encoding))
try:
io.open(file, how, encoding=encoding).read() # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
print(file)
print(file_bytes) # depends on [control=['except'], data=[]]
self.found_encoding[file] = encoding
return io.open(file, how, encoding=encoding)
|
def _check_exclude(self, val):
"""
Validate the excluded metrics. Returns the set of excluded params.
"""
if val is None:
exclude = frozenset()
elif isinstance(val, str):
exclude = frozenset([val.lower()])
else:
exclude = frozenset(map(lambda s: s.lower(), val))
if len(exclude - frozenset(METRICS)) > 0:
raise YellowbrickValueError(
"'{}' is not a valid metric to exclude".format(repr(val))
)
return exclude
|
def function[_check_exclude, parameter[self, val]]:
constant[
Validate the excluded metrics. Returns the set of excluded params.
]
if compare[name[val] is constant[None]] begin[:]
variable[exclude] assign[=] call[name[frozenset], parameter[]]
if compare[call[name[len], parameter[binary_operation[name[exclude] - call[name[frozenset], parameter[name[METRICS]]]]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da20cabc640>
return[name[exclude]]
|
keyword[def] identifier[_check_exclude] ( identifier[self] , identifier[val] ):
literal[string]
keyword[if] identifier[val] keyword[is] keyword[None] :
identifier[exclude] = identifier[frozenset] ()
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[str] ):
identifier[exclude] = identifier[frozenset] ([ identifier[val] . identifier[lower] ()])
keyword[else] :
identifier[exclude] = identifier[frozenset] ( identifier[map] ( keyword[lambda] identifier[s] : identifier[s] . identifier[lower] (), identifier[val] ))
keyword[if] identifier[len] ( identifier[exclude] - identifier[frozenset] ( identifier[METRICS] ))> literal[int] :
keyword[raise] identifier[YellowbrickValueError] (
literal[string] . identifier[format] ( identifier[repr] ( identifier[val] ))
)
keyword[return] identifier[exclude]
|
def _check_exclude(self, val):
"""
Validate the excluded metrics. Returns the set of excluded params.
"""
if val is None:
exclude = frozenset() # depends on [control=['if'], data=[]]
elif isinstance(val, str):
exclude = frozenset([val.lower()]) # depends on [control=['if'], data=[]]
else:
exclude = frozenset(map(lambda s: s.lower(), val))
if len(exclude - frozenset(METRICS)) > 0:
raise YellowbrickValueError("'{}' is not a valid metric to exclude".format(repr(val))) # depends on [control=['if'], data=[]]
return exclude
|
def GetOobResult(self, param, user_ip, gitkit_token=None):
"""Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
"""
if 'action' in param:
try:
if param['action'] == GitkitClient.RESET_PASSWORD_ACTION:
request = self._PasswordResetRequest(param, user_ip)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.RESET_PASSWORD_ACTION,
'email': param['email'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION:
if not gitkit_token:
return self._FailureOobResponse('login is required')
request = self._ChangeEmailRequest(param, user_ip, gitkit_token)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.CHANGE_EMAIL_ACTION,
'email': param['oldEmail'],
'new_email': param['newEmail'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
except errors.GitkitClientError as error:
return self._FailureOobResponse(error.value)
return self._FailureOobResponse('unknown request type')
|
def function[GetOobResult, parameter[self, param, user_ip, gitkit_token]]:
constant[Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
]
if compare[constant[action] in name[param]] begin[:]
<ast.Try object at 0x7da1b1a9f970>
return[call[name[self]._FailureOobResponse, parameter[constant[unknown request type]]]]
|
keyword[def] identifier[GetOobResult] ( identifier[self] , identifier[param] , identifier[user_ip] , identifier[gitkit_token] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[param] :
keyword[try] :
keyword[if] identifier[param] [ literal[string] ]== identifier[GitkitClient] . identifier[RESET_PASSWORD_ACTION] :
identifier[request] = identifier[self] . identifier[_PasswordResetRequest] ( identifier[param] , identifier[user_ip] )
identifier[oob_code] , identifier[oob_link] = identifier[self] . identifier[_BuildOobLink] ( identifier[request] ,
identifier[param] [ literal[string] ])
keyword[return] {
literal[string] : identifier[GitkitClient] . identifier[RESET_PASSWORD_ACTION] ,
literal[string] : identifier[param] [ literal[string] ],
literal[string] : identifier[oob_link] ,
literal[string] : identifier[oob_code] ,
literal[string] : identifier[simplejson] . identifier[dumps] ({ literal[string] : keyword[True] })
}
keyword[elif] identifier[param] [ literal[string] ]== identifier[GitkitClient] . identifier[CHANGE_EMAIL_ACTION] :
keyword[if] keyword[not] identifier[gitkit_token] :
keyword[return] identifier[self] . identifier[_FailureOobResponse] ( literal[string] )
identifier[request] = identifier[self] . identifier[_ChangeEmailRequest] ( identifier[param] , identifier[user_ip] , identifier[gitkit_token] )
identifier[oob_code] , identifier[oob_link] = identifier[self] . identifier[_BuildOobLink] ( identifier[request] ,
identifier[param] [ literal[string] ])
keyword[return] {
literal[string] : identifier[GitkitClient] . identifier[CHANGE_EMAIL_ACTION] ,
literal[string] : identifier[param] [ literal[string] ],
literal[string] : identifier[param] [ literal[string] ],
literal[string] : identifier[oob_link] ,
literal[string] : identifier[oob_code] ,
literal[string] : identifier[simplejson] . identifier[dumps] ({ literal[string] : keyword[True] })
}
keyword[except] identifier[errors] . identifier[GitkitClientError] keyword[as] identifier[error] :
keyword[return] identifier[self] . identifier[_FailureOobResponse] ( identifier[error] . identifier[value] )
keyword[return] identifier[self] . identifier[_FailureOobResponse] ( literal[string] )
|
def GetOobResult(self, param, user_ip, gitkit_token=None):
"""Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
"""
if 'action' in param:
try:
if param['action'] == GitkitClient.RESET_PASSWORD_ACTION:
request = self._PasswordResetRequest(param, user_ip)
(oob_code, oob_link) = self._BuildOobLink(request, param['action'])
return {'action': GitkitClient.RESET_PASSWORD_ACTION, 'email': param['email'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True})} # depends on [control=['if'], data=[]]
elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION:
if not gitkit_token:
return self._FailureOobResponse('login is required') # depends on [control=['if'], data=[]]
request = self._ChangeEmailRequest(param, user_ip, gitkit_token)
(oob_code, oob_link) = self._BuildOobLink(request, param['action'])
return {'action': GitkitClient.CHANGE_EMAIL_ACTION, 'email': param['oldEmail'], 'new_email': param['newEmail'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True})} # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except errors.GitkitClientError as error:
return self._FailureOobResponse(error.value) # depends on [control=['except'], data=['error']] # depends on [control=['if'], data=['param']]
return self._FailureOobResponse('unknown request type')
|
def limit(self, n, skip=None):
"""
Limit the result set. However when the query set already has limit field before,
this would raise an exception
:Parameters:
- n : The maximum number of rows returned
- skip: how many rows to skip
:Return: a new QuerySet object so we can chain operations
"""
if self.query.limit is not None:
raise MonSQLException('LIMIT already defined')
new_query_set = self.clone()
new_query_set.query.limit = n
new_query_set.query.skip = skip
return new_query_set
|
def function[limit, parameter[self, n, skip]]:
constant[
Limit the result set. However when the query set already has limit field before,
this would raise an exception
:Parameters:
- n : The maximum number of rows returned
- skip: how many rows to skip
:Return: a new QuerySet object so we can chain operations
]
if compare[name[self].query.limit is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b2346500>
variable[new_query_set] assign[=] call[name[self].clone, parameter[]]
name[new_query_set].query.limit assign[=] name[n]
name[new_query_set].query.skip assign[=] name[skip]
return[name[new_query_set]]
|
keyword[def] identifier[limit] ( identifier[self] , identifier[n] , identifier[skip] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[query] . identifier[limit] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[MonSQLException] ( literal[string] )
identifier[new_query_set] = identifier[self] . identifier[clone] ()
identifier[new_query_set] . identifier[query] . identifier[limit] = identifier[n]
identifier[new_query_set] . identifier[query] . identifier[skip] = identifier[skip]
keyword[return] identifier[new_query_set]
|
def limit(self, n, skip=None):
"""
Limit the result set. However when the query set already has limit field before,
this would raise an exception
:Parameters:
- n : The maximum number of rows returned
- skip: how many rows to skip
:Return: a new QuerySet object so we can chain operations
"""
if self.query.limit is not None:
raise MonSQLException('LIMIT already defined') # depends on [control=['if'], data=[]]
new_query_set = self.clone()
new_query_set.query.limit = n
new_query_set.query.skip = skip
return new_query_set
|
def get_jobs(self, job_ids = None):
"""Returns a list of jobs that are stored in the database."""
if job_ids is not None and len(job_ids) == 0:
return []
q = self.session.query(Job)
if job_ids is not None:
q = q.filter(Job.unique.in_(job_ids))
return sorted(list(q), key=lambda job: job.unique)
|
def function[get_jobs, parameter[self, job_ids]]:
constant[Returns a list of jobs that are stored in the database.]
if <ast.BoolOp object at 0x7da18f723b50> begin[:]
return[list[[]]]
variable[q] assign[=] call[name[self].session.query, parameter[name[Job]]]
if compare[name[job_ids] is_not constant[None]] begin[:]
variable[q] assign[=] call[name[q].filter, parameter[call[name[Job].unique.in_, parameter[name[job_ids]]]]]
return[call[name[sorted], parameter[call[name[list], parameter[name[q]]]]]]
|
keyword[def] identifier[get_jobs] ( identifier[self] , identifier[job_ids] = keyword[None] ):
literal[string]
keyword[if] identifier[job_ids] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[job_ids] )== literal[int] :
keyword[return] []
identifier[q] = identifier[self] . identifier[session] . identifier[query] ( identifier[Job] )
keyword[if] identifier[job_ids] keyword[is] keyword[not] keyword[None] :
identifier[q] = identifier[q] . identifier[filter] ( identifier[Job] . identifier[unique] . identifier[in_] ( identifier[job_ids] ))
keyword[return] identifier[sorted] ( identifier[list] ( identifier[q] ), identifier[key] = keyword[lambda] identifier[job] : identifier[job] . identifier[unique] )
|
def get_jobs(self, job_ids=None):
"""Returns a list of jobs that are stored in the database."""
if job_ids is not None and len(job_ids) == 0:
return [] # depends on [control=['if'], data=[]]
q = self.session.query(Job)
if job_ids is not None:
q = q.filter(Job.unique.in_(job_ids)) # depends on [control=['if'], data=['job_ids']]
return sorted(list(q), key=lambda job: job.unique)
|
def contacts(self, uid=0, **kwargs):
"""
Fetch user contacts by given group id.
A useful synonym for "contacts/search" command with provided `groupId` parameter.
:Example:
lists = client.lists.contacts(1901010)
:param int uid: The unique id of the List. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
contacts = Contacts(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=contacts,
resource="contacts", params=kwargs)
|
def function[contacts, parameter[self, uid]]:
constant[
Fetch user contacts by given group id.
A useful synonym for "contacts/search" command with provided `groupId` parameter.
:Example:
lists = client.lists.contacts(1901010)
:param int uid: The unique id of the List. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
]
variable[contacts] assign[=] call[name[Contacts], parameter[name[self].base_uri, name[self].auth]]
return[call[name[self].get_subresource_instances, parameter[name[uid]]]]
|
keyword[def] identifier[contacts] ( identifier[self] , identifier[uid] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[contacts] = identifier[Contacts] ( identifier[self] . identifier[base_uri] , identifier[self] . identifier[auth] )
keyword[return] identifier[self] . identifier[get_subresource_instances] ( identifier[uid] , identifier[instance] = identifier[contacts] ,
identifier[resource] = literal[string] , identifier[params] = identifier[kwargs] )
|
def contacts(self, uid=0, **kwargs):
"""
Fetch user contacts by given group id.
A useful synonym for "contacts/search" command with provided `groupId` parameter.
:Example:
lists = client.lists.contacts(1901010)
:param int uid: The unique id of the List. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
contacts = Contacts(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=contacts, resource='contacts', params=kwargs)
|
def do_set(self, args: argparse.Namespace) -> None:
"""Set a settable parameter or show current settings of parameters"""
# Check if param was passed in
if not args.param:
return self.show(args)
param = utils.norm_fold(args.param.strip())
# Check if value was passed in
if not args.value:
return self.show(args, param)
value = args.value
# Check if param points to just one settable
if param not in self.settable:
hits = [p for p in self.settable if p.startswith(param)]
if len(hits) == 1:
param = hits[0]
else:
return self.show(args, param)
# Update the settable's value
current_value = getattr(self, param)
value = utils.cast(current_value, value)
setattr(self, param, value)
self.poutput('{} - was: {}\nnow: {}\n'.format(param, current_value, value))
# See if we need to call a change hook for this settable
if current_value != value:
onchange_hook = getattr(self, '_onchange_{}'.format(param), None)
if onchange_hook is not None:
onchange_hook(old=current_value, new=value)
|
def function[do_set, parameter[self, args]]:
constant[Set a settable parameter or show current settings of parameters]
if <ast.UnaryOp object at 0x7da1b26af4f0> begin[:]
return[call[name[self].show, parameter[name[args]]]]
variable[param] assign[=] call[name[utils].norm_fold, parameter[call[name[args].param.strip, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b26ad3c0> begin[:]
return[call[name[self].show, parameter[name[args], name[param]]]]
variable[value] assign[=] name[args].value
if compare[name[param] <ast.NotIn object at 0x7da2590d7190> name[self].settable] begin[:]
variable[hits] assign[=] <ast.ListComp object at 0x7da1b26acfd0>
if compare[call[name[len], parameter[name[hits]]] equal[==] constant[1]] begin[:]
variable[param] assign[=] call[name[hits]][constant[0]]
variable[current_value] assign[=] call[name[getattr], parameter[name[self], name[param]]]
variable[value] assign[=] call[name[utils].cast, parameter[name[current_value], name[value]]]
call[name[setattr], parameter[name[self], name[param], name[value]]]
call[name[self].poutput, parameter[call[constant[{} - was: {}
now: {}
].format, parameter[name[param], name[current_value], name[value]]]]]
if compare[name[current_value] not_equal[!=] name[value]] begin[:]
variable[onchange_hook] assign[=] call[name[getattr], parameter[name[self], call[constant[_onchange_{}].format, parameter[name[param]]], constant[None]]]
if compare[name[onchange_hook] is_not constant[None]] begin[:]
call[name[onchange_hook], parameter[]]
|
keyword[def] identifier[do_set] ( identifier[self] , identifier[args] : identifier[argparse] . identifier[Namespace] )-> keyword[None] :
literal[string]
keyword[if] keyword[not] identifier[args] . identifier[param] :
keyword[return] identifier[self] . identifier[show] ( identifier[args] )
identifier[param] = identifier[utils] . identifier[norm_fold] ( identifier[args] . identifier[param] . identifier[strip] ())
keyword[if] keyword[not] identifier[args] . identifier[value] :
keyword[return] identifier[self] . identifier[show] ( identifier[args] , identifier[param] )
identifier[value] = identifier[args] . identifier[value]
keyword[if] identifier[param] keyword[not] keyword[in] identifier[self] . identifier[settable] :
identifier[hits] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[settable] keyword[if] identifier[p] . identifier[startswith] ( identifier[param] )]
keyword[if] identifier[len] ( identifier[hits] )== literal[int] :
identifier[param] = identifier[hits] [ literal[int] ]
keyword[else] :
keyword[return] identifier[self] . identifier[show] ( identifier[args] , identifier[param] )
identifier[current_value] = identifier[getattr] ( identifier[self] , identifier[param] )
identifier[value] = identifier[utils] . identifier[cast] ( identifier[current_value] , identifier[value] )
identifier[setattr] ( identifier[self] , identifier[param] , identifier[value] )
identifier[self] . identifier[poutput] ( literal[string] . identifier[format] ( identifier[param] , identifier[current_value] , identifier[value] ))
keyword[if] identifier[current_value] != identifier[value] :
identifier[onchange_hook] = identifier[getattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[param] ), keyword[None] )
keyword[if] identifier[onchange_hook] keyword[is] keyword[not] keyword[None] :
identifier[onchange_hook] ( identifier[old] = identifier[current_value] , identifier[new] = identifier[value] )
|
def do_set(self, args: argparse.Namespace) -> None:
"""Set a settable parameter or show current settings of parameters"""
# Check if param was passed in
if not args.param:
return self.show(args) # depends on [control=['if'], data=[]]
param = utils.norm_fold(args.param.strip())
# Check if value was passed in
if not args.value:
return self.show(args, param) # depends on [control=['if'], data=[]]
value = args.value
# Check if param points to just one settable
if param not in self.settable:
hits = [p for p in self.settable if p.startswith(param)]
if len(hits) == 1:
param = hits[0] # depends on [control=['if'], data=[]]
else:
return self.show(args, param) # depends on [control=['if'], data=['param']]
# Update the settable's value
current_value = getattr(self, param)
value = utils.cast(current_value, value)
setattr(self, param, value)
self.poutput('{} - was: {}\nnow: {}\n'.format(param, current_value, value))
# See if we need to call a change hook for this settable
if current_value != value:
onchange_hook = getattr(self, '_onchange_{}'.format(param), None)
if onchange_hook is not None:
onchange_hook(old=current_value, new=value) # depends on [control=['if'], data=['onchange_hook']] # depends on [control=['if'], data=['current_value', 'value']]
|
def _get_axes(dim, subplots_kwargs=dict()):
"""
Parameters
----------
dim : int
Dimensionality of the orbit.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to :func:`~matplotlib.pyplot.subplots`.
"""
import matplotlib.pyplot as plt
if dim > 1:
n_panels = int(dim * (dim - 1) / 2)
else:
n_panels = 1
figsize = subplots_kwargs.pop('figsize', (4*n_panels, 4))
fig, axes = plt.subplots(1, n_panels, figsize=figsize,
**subplots_kwargs)
if n_panels == 1:
axes = [axes]
else:
axes = axes.flat
return axes
|
def function[_get_axes, parameter[dim, subplots_kwargs]]:
constant[
Parameters
----------
dim : int
Dimensionality of the orbit.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to :func:`~matplotlib.pyplot.subplots`.
]
import module[matplotlib.pyplot] as alias[plt]
if compare[name[dim] greater[>] constant[1]] begin[:]
variable[n_panels] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[dim] * binary_operation[name[dim] - constant[1]]] / constant[2]]]]
variable[figsize] assign[=] call[name[subplots_kwargs].pop, parameter[constant[figsize], tuple[[<ast.BinOp object at 0x7da1b0e39420>, <ast.Constant object at 0x7da1b0e39f60>]]]]
<ast.Tuple object at 0x7da1b0e38310> assign[=] call[name[plt].subplots, parameter[constant[1], name[n_panels]]]
if compare[name[n_panels] equal[==] constant[1]] begin[:]
variable[axes] assign[=] list[[<ast.Name object at 0x7da1b0e39ae0>]]
return[name[axes]]
|
keyword[def] identifier[_get_axes] ( identifier[dim] , identifier[subplots_kwargs] = identifier[dict] ()):
literal[string]
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[if] identifier[dim] > literal[int] :
identifier[n_panels] = identifier[int] ( identifier[dim] *( identifier[dim] - literal[int] )/ literal[int] )
keyword[else] :
identifier[n_panels] = literal[int]
identifier[figsize] = identifier[subplots_kwargs] . identifier[pop] ( literal[string] ,( literal[int] * identifier[n_panels] , literal[int] ))
identifier[fig] , identifier[axes] = identifier[plt] . identifier[subplots] ( literal[int] , identifier[n_panels] , identifier[figsize] = identifier[figsize] ,
** identifier[subplots_kwargs] )
keyword[if] identifier[n_panels] == literal[int] :
identifier[axes] =[ identifier[axes] ]
keyword[else] :
identifier[axes] = identifier[axes] . identifier[flat]
keyword[return] identifier[axes]
|
def _get_axes(dim, subplots_kwargs=dict()):
"""
Parameters
----------
dim : int
Dimensionality of the orbit.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to :func:`~matplotlib.pyplot.subplots`.
"""
import matplotlib.pyplot as plt
if dim > 1:
n_panels = int(dim * (dim - 1) / 2) # depends on [control=['if'], data=['dim']]
else:
n_panels = 1
figsize = subplots_kwargs.pop('figsize', (4 * n_panels, 4))
(fig, axes) = plt.subplots(1, n_panels, figsize=figsize, **subplots_kwargs)
if n_panels == 1:
axes = [axes] # depends on [control=['if'], data=[]]
else:
axes = axes.flat
return axes
|
def create_load_string(upload_dir, groups=None, organism=None):
"""
create the code necessary to load the bcbioRNAseq object
"""
libraryline = 'library(bcbioRNASeq)'
load_template = Template(
('bcb <- bcbioRNASeq(uploadDir="$upload_dir",'
'interestingGroups=$groups,'
'organism="$organism")'))
load_noorganism_template = Template(
('bcb <- bcbioRNASeq(uploadDir="$upload_dir",'
'interestingGroups=$groups,'
'organism=NULL)'))
flatline = 'flat <- flatFiles(bcb)'
saveline = 'saveData(bcb, flat, dir="data")'
if groups:
groups = _list2Rlist(groups)
else:
groups = _quotestring("sampleName")
if organism:
load_bcbio = load_template.substitute(
upload_dir=upload_dir, groups=groups, organism=organism)
else:
load_bcbio = load_noorganism_template.substitute(upload_dir=upload_dir,
groups=groups)
return ";\n".join([libraryline, load_bcbio, flatline, saveline])
|
def function[create_load_string, parameter[upload_dir, groups, organism]]:
constant[
create the code necessary to load the bcbioRNAseq object
]
variable[libraryline] assign[=] constant[library(bcbioRNASeq)]
variable[load_template] assign[=] call[name[Template], parameter[constant[bcb <- bcbioRNASeq(uploadDir="$upload_dir",interestingGroups=$groups,organism="$organism")]]]
variable[load_noorganism_template] assign[=] call[name[Template], parameter[constant[bcb <- bcbioRNASeq(uploadDir="$upload_dir",interestingGroups=$groups,organism=NULL)]]]
variable[flatline] assign[=] constant[flat <- flatFiles(bcb)]
variable[saveline] assign[=] constant[saveData(bcb, flat, dir="data")]
if name[groups] begin[:]
variable[groups] assign[=] call[name[_list2Rlist], parameter[name[groups]]]
if name[organism] begin[:]
variable[load_bcbio] assign[=] call[name[load_template].substitute, parameter[]]
return[call[constant[;
].join, parameter[list[[<ast.Name object at 0x7da1b18a85b0>, <ast.Name object at 0x7da1b18a9540>, <ast.Name object at 0x7da1b18a92d0>, <ast.Name object at 0x7da1b18a8760>]]]]]
|
keyword[def] identifier[create_load_string] ( identifier[upload_dir] , identifier[groups] = keyword[None] , identifier[organism] = keyword[None] ):
literal[string]
identifier[libraryline] = literal[string]
identifier[load_template] = identifier[Template] (
( literal[string]
literal[string]
literal[string] ))
identifier[load_noorganism_template] = identifier[Template] (
( literal[string]
literal[string]
literal[string] ))
identifier[flatline] = literal[string]
identifier[saveline] = literal[string]
keyword[if] identifier[groups] :
identifier[groups] = identifier[_list2Rlist] ( identifier[groups] )
keyword[else] :
identifier[groups] = identifier[_quotestring] ( literal[string] )
keyword[if] identifier[organism] :
identifier[load_bcbio] = identifier[load_template] . identifier[substitute] (
identifier[upload_dir] = identifier[upload_dir] , identifier[groups] = identifier[groups] , identifier[organism] = identifier[organism] )
keyword[else] :
identifier[load_bcbio] = identifier[load_noorganism_template] . identifier[substitute] ( identifier[upload_dir] = identifier[upload_dir] ,
identifier[groups] = identifier[groups] )
keyword[return] literal[string] . identifier[join] ([ identifier[libraryline] , identifier[load_bcbio] , identifier[flatline] , identifier[saveline] ])
|
def create_load_string(upload_dir, groups=None, organism=None):
"""
create the code necessary to load the bcbioRNAseq object
"""
libraryline = 'library(bcbioRNASeq)'
load_template = Template('bcb <- bcbioRNASeq(uploadDir="$upload_dir",interestingGroups=$groups,organism="$organism")')
load_noorganism_template = Template('bcb <- bcbioRNASeq(uploadDir="$upload_dir",interestingGroups=$groups,organism=NULL)')
flatline = 'flat <- flatFiles(bcb)'
saveline = 'saveData(bcb, flat, dir="data")'
if groups:
groups = _list2Rlist(groups) # depends on [control=['if'], data=[]]
else:
groups = _quotestring('sampleName')
if organism:
load_bcbio = load_template.substitute(upload_dir=upload_dir, groups=groups, organism=organism) # depends on [control=['if'], data=[]]
else:
load_bcbio = load_noorganism_template.substitute(upload_dir=upload_dir, groups=groups)
return ';\n'.join([libraryline, load_bcbio, flatline, saveline])
|
def _extract(cls, compressed_file, videofile, exts):
""" 解压字幕文件,如果无法解压,则直接返回 compressed_file。
exts 参数用于过滤掉非字幕文件,只有文件的扩展名在 exts 中,才解压该文件。
"""
if not CompressedFile.is_compressed_file(compressed_file):
return [compressed_file]
root = os.path.dirname(compressed_file)
subs = []
cf = CompressedFile(compressed_file)
for name in cf.namelist():
if cf.isdir(name):
continue
# make `name` to unicode string
orig_name = CompressedFile.decode_file_name(name)
_, ext = os.path.splitext(orig_name)
ext = ext[1:]
if ext not in exts:
continue
subname = cls._gen_subname(videofile, '', '', orig_name=orig_name)
subpath = os.path.join(root, subname)
cf.extract(name, subpath)
subs.append(subpath)
cf.close()
return subs
|
def function[_extract, parameter[cls, compressed_file, videofile, exts]]:
constant[ 解压字幕文件,如果无法解压,则直接返回 compressed_file。
exts 参数用于过滤掉非字幕文件,只有文件的扩展名在 exts 中,才解压该文件。
]
if <ast.UnaryOp object at 0x7da1b2272e60> begin[:]
return[list[[<ast.Name object at 0x7da1b2271f60>]]]
variable[root] assign[=] call[name[os].path.dirname, parameter[name[compressed_file]]]
variable[subs] assign[=] list[[]]
variable[cf] assign[=] call[name[CompressedFile], parameter[name[compressed_file]]]
for taget[name[name]] in starred[call[name[cf].namelist, parameter[]]] begin[:]
if call[name[cf].isdir, parameter[name[name]]] begin[:]
continue
variable[orig_name] assign[=] call[name[CompressedFile].decode_file_name, parameter[name[name]]]
<ast.Tuple object at 0x7da1b2272350> assign[=] call[name[os].path.splitext, parameter[name[orig_name]]]
variable[ext] assign[=] call[name[ext]][<ast.Slice object at 0x7da1b2270370>]
if compare[name[ext] <ast.NotIn object at 0x7da2590d7190> name[exts]] begin[:]
continue
variable[subname] assign[=] call[name[cls]._gen_subname, parameter[name[videofile], constant[], constant[]]]
variable[subpath] assign[=] call[name[os].path.join, parameter[name[root], name[subname]]]
call[name[cf].extract, parameter[name[name], name[subpath]]]
call[name[subs].append, parameter[name[subpath]]]
call[name[cf].close, parameter[]]
return[name[subs]]
|
keyword[def] identifier[_extract] ( identifier[cls] , identifier[compressed_file] , identifier[videofile] , identifier[exts] ):
literal[string]
keyword[if] keyword[not] identifier[CompressedFile] . identifier[is_compressed_file] ( identifier[compressed_file] ):
keyword[return] [ identifier[compressed_file] ]
identifier[root] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[compressed_file] )
identifier[subs] =[]
identifier[cf] = identifier[CompressedFile] ( identifier[compressed_file] )
keyword[for] identifier[name] keyword[in] identifier[cf] . identifier[namelist] ():
keyword[if] identifier[cf] . identifier[isdir] ( identifier[name] ):
keyword[continue]
identifier[orig_name] = identifier[CompressedFile] . identifier[decode_file_name] ( identifier[name] )
identifier[_] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[orig_name] )
identifier[ext] = identifier[ext] [ literal[int] :]
keyword[if] identifier[ext] keyword[not] keyword[in] identifier[exts] :
keyword[continue]
identifier[subname] = identifier[cls] . identifier[_gen_subname] ( identifier[videofile] , literal[string] , literal[string] , identifier[orig_name] = identifier[orig_name] )
identifier[subpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[subname] )
identifier[cf] . identifier[extract] ( identifier[name] , identifier[subpath] )
identifier[subs] . identifier[append] ( identifier[subpath] )
identifier[cf] . identifier[close] ()
keyword[return] identifier[subs]
|
def _extract(cls, compressed_file, videofile, exts):
""" 解压字幕文件,如果无法解压,则直接返回 compressed_file。
exts 参数用于过滤掉非字幕文件,只有文件的扩展名在 exts 中,才解压该文件。
"""
if not CompressedFile.is_compressed_file(compressed_file):
return [compressed_file] # depends on [control=['if'], data=[]]
root = os.path.dirname(compressed_file)
subs = []
cf = CompressedFile(compressed_file)
for name in cf.namelist():
if cf.isdir(name):
continue # depends on [control=['if'], data=[]]
# make `name` to unicode string
orig_name = CompressedFile.decode_file_name(name)
(_, ext) = os.path.splitext(orig_name)
ext = ext[1:]
if ext not in exts:
continue # depends on [control=['if'], data=[]]
subname = cls._gen_subname(videofile, '', '', orig_name=orig_name)
subpath = os.path.join(root, subname)
cf.extract(name, subpath)
subs.append(subpath) # depends on [control=['for'], data=['name']]
cf.close()
return subs
|
def spit_config(self, conf_file, firstwordonly=False):
"""conf_file a file opened for writing."""
cfg = ConfigParser.RawConfigParser()
for sec in _CONFIG_SECS:
cfg.add_section(sec)
sec = 'channels'
for i in sorted(self.pack.D):
cfg.set(sec, str(i),
self.pack.name(i, firstwordonly=firstwordonly))
sec = 'conditions'
for k in self.sorted_conkeys():
cfg.set(sec, k, self.conditions[k])
cfg.write(conf_file)
|
def function[spit_config, parameter[self, conf_file, firstwordonly]]:
constant[conf_file a file opened for writing.]
variable[cfg] assign[=] call[name[ConfigParser].RawConfigParser, parameter[]]
for taget[name[sec]] in starred[name[_CONFIG_SECS]] begin[:]
call[name[cfg].add_section, parameter[name[sec]]]
variable[sec] assign[=] constant[channels]
for taget[name[i]] in starred[call[name[sorted], parameter[name[self].pack.D]]] begin[:]
call[name[cfg].set, parameter[name[sec], call[name[str], parameter[name[i]]], call[name[self].pack.name, parameter[name[i]]]]]
variable[sec] assign[=] constant[conditions]
for taget[name[k]] in starred[call[name[self].sorted_conkeys, parameter[]]] begin[:]
call[name[cfg].set, parameter[name[sec], name[k], call[name[self].conditions][name[k]]]]
call[name[cfg].write, parameter[name[conf_file]]]
|
keyword[def] identifier[spit_config] ( identifier[self] , identifier[conf_file] , identifier[firstwordonly] = keyword[False] ):
literal[string]
identifier[cfg] = identifier[ConfigParser] . identifier[RawConfigParser] ()
keyword[for] identifier[sec] keyword[in] identifier[_CONFIG_SECS] :
identifier[cfg] . identifier[add_section] ( identifier[sec] )
identifier[sec] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[sorted] ( identifier[self] . identifier[pack] . identifier[D] ):
identifier[cfg] . identifier[set] ( identifier[sec] , identifier[str] ( identifier[i] ),
identifier[self] . identifier[pack] . identifier[name] ( identifier[i] , identifier[firstwordonly] = identifier[firstwordonly] ))
identifier[sec] = literal[string]
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[sorted_conkeys] ():
identifier[cfg] . identifier[set] ( identifier[sec] , identifier[k] , identifier[self] . identifier[conditions] [ identifier[k] ])
identifier[cfg] . identifier[write] ( identifier[conf_file] )
|
def spit_config(self, conf_file, firstwordonly=False):
"""conf_file a file opened for writing."""
cfg = ConfigParser.RawConfigParser()
for sec in _CONFIG_SECS:
cfg.add_section(sec) # depends on [control=['for'], data=['sec']]
sec = 'channels'
for i in sorted(self.pack.D):
cfg.set(sec, str(i), self.pack.name(i, firstwordonly=firstwordonly)) # depends on [control=['for'], data=['i']]
sec = 'conditions'
for k in self.sorted_conkeys():
cfg.set(sec, k, self.conditions[k]) # depends on [control=['for'], data=['k']]
cfg.write(conf_file)
|
def sync_model(self, comment='', compact_central=False,
release_borrowed=True, release_workset=True,
save_local=False):
"""Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
"""
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT)
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)
self._add_entry(templates.FILE_SYNC_COMMENT_OK
.format(sync_comment=comment))
|
def function[sync_model, parameter[self, comment, compact_central, release_borrowed, release_workset, save_local]]:
constant[Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
]
call[name[self]._add_entry, parameter[name[templates].FILE_SYNC_START]]
if name[compact_central] begin[:]
call[name[self]._add_entry, parameter[name[templates].FILE_SYNC_COMPACT]]
if name[release_borrowed] begin[:]
call[name[self]._add_entry, parameter[name[templates].FILE_SYNC_RELEASE_BORROWED]]
if name[release_workset] begin[:]
call[name[self]._add_entry, parameter[name[templates].FILE_SYNC_RELEASE_USERWORKSETS]]
if name[save_local] begin[:]
call[name[self]._add_entry, parameter[name[templates].FILE_SYNC_RELEASE_SAVELOCAL]]
call[name[self]._add_entry, parameter[call[name[templates].FILE_SYNC_COMMENT_OK.format, parameter[]]]]
|
keyword[def] identifier[sync_model] ( identifier[self] , identifier[comment] = literal[string] , identifier[compact_central] = keyword[False] ,
identifier[release_borrowed] = keyword[True] , identifier[release_workset] = keyword[True] ,
identifier[save_local] = keyword[False] ):
literal[string]
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_START] )
keyword[if] identifier[compact_central] :
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_COMPACT] )
keyword[if] identifier[release_borrowed] :
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_RELEASE_BORROWED] )
keyword[if] identifier[release_workset] :
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_RELEASE_USERWORKSETS] )
keyword[if] identifier[save_local] :
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_RELEASE_SAVELOCAL] )
identifier[self] . identifier[_add_entry] ( identifier[templates] . identifier[FILE_SYNC_COMMENT_OK]
. identifier[format] ( identifier[sync_comment] = identifier[comment] ))
|
def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False):
"""Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
"""
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT) # depends on [control=['if'], data=[]]
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED) # depends on [control=['if'], data=[]]
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS) # depends on [control=['if'], data=[]]
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL) # depends on [control=['if'], data=[]]
self._add_entry(templates.FILE_SYNC_COMMENT_OK.format(sync_comment=comment))
|
def is_descendant_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id)
|
def function[is_descendant_of_vault, parameter[self, id_, vault_id]]:
constant[Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.is_descendant_of_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.is_descendant, parameter[]]]
|
keyword[def] identifier[is_descendant_of_vault] ( identifier[self] , identifier[id_] , identifier[vault_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[is_descendant_of_catalog] ( identifier[id_] = identifier[id_] , identifier[catalog_id] = identifier[vault_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[is_descendant] ( identifier[id_] = identifier[id_] , identifier[descendant_id] = identifier[vault_id] )
|
def is_descendant_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id)
|
def threadpooled( # noqa: F811
func: typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]] = None,
*,
loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None,
loop_getter_need_context: bool = False,
) -> typing.Union[
ThreadPooled,
typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"],
]:
"""Post function to ThreadPoolExecutor.
:param func: function to wrap
:type func: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
:param loop_getter: Method to get event loop, if wrap in asyncio task
:type loop_getter: typing.Union[
None,
typing.Callable[..., asyncio.AbstractEventLoop],
asyncio.AbstractEventLoop
]
:param loop_getter_need_context: Loop getter requires function context
:type loop_getter_need_context: bool
:return: ThreadPooled instance, if called as function or argumented decorator, else callable wrapper
:rtype: typing.Union[ThreadPooled, typing.Callable[..., typing.Union[concurrent.futures.Future, typing.Awaitable]]]
"""
if func is None:
return ThreadPooled(func=func, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context)
return ThreadPooled( # type: ignore
func=None, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context
)(func)
|
def function[threadpooled, parameter[func]]:
constant[Post function to ThreadPoolExecutor.
:param func: function to wrap
:type func: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
:param loop_getter: Method to get event loop, if wrap in asyncio task
:type loop_getter: typing.Union[
None,
typing.Callable[..., asyncio.AbstractEventLoop],
asyncio.AbstractEventLoop
]
:param loop_getter_need_context: Loop getter requires function context
:type loop_getter_need_context: bool
:return: ThreadPooled instance, if called as function or argumented decorator, else callable wrapper
:rtype: typing.Union[ThreadPooled, typing.Callable[..., typing.Union[concurrent.futures.Future, typing.Awaitable]]]
]
if compare[name[func] is constant[None]] begin[:]
return[call[name[ThreadPooled], parameter[]]]
return[call[call[name[ThreadPooled], parameter[]], parameter[name[func]]]]
|
keyword[def] identifier[threadpooled] (
identifier[func] : identifier[typing] . identifier[Optional] [ identifier[typing] . identifier[Callable] [..., identifier[typing] . identifier[Union] [ literal[string] , identifier[typing] . identifier[Any] ]]]= keyword[None] ,
*,
identifier[loop_getter] : identifier[typing] . identifier[Union] [ keyword[None] , identifier[typing] . identifier[Callable] [..., identifier[asyncio] . identifier[AbstractEventLoop] ], identifier[asyncio] . identifier[AbstractEventLoop] ]= keyword[None] ,
identifier[loop_getter_need_context] : identifier[bool] = keyword[False] ,
)-> identifier[typing] . identifier[Union] [
identifier[ThreadPooled] ,
identifier[typing] . identifier[Callable] [..., literal[string] ],
]:
literal[string]
keyword[if] identifier[func] keyword[is] keyword[None] :
keyword[return] identifier[ThreadPooled] ( identifier[func] = identifier[func] , identifier[loop_getter] = identifier[loop_getter] , identifier[loop_getter_need_context] = identifier[loop_getter_need_context] )
keyword[return] identifier[ThreadPooled] (
identifier[func] = keyword[None] , identifier[loop_getter] = identifier[loop_getter] , identifier[loop_getter_need_context] = identifier[loop_getter_need_context]
)( identifier[func] )
|
def threadpooled(func: typing.Optional[typing.Callable[..., typing.Union['typing.Awaitable[typing.Any]', typing.Any]]]=None, *, loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop]=None, loop_getter_need_context: bool=False) -> typing.Union[ThreadPooled, typing.Callable[..., 'typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]']]: # noqa: F811
'Post function to ThreadPoolExecutor.\n\n :param func: function to wrap\n :type func: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]\n :param loop_getter: Method to get event loop, if wrap in asyncio task\n :type loop_getter: typing.Union[\n None,\n typing.Callable[..., asyncio.AbstractEventLoop],\n asyncio.AbstractEventLoop\n ]\n :param loop_getter_need_context: Loop getter requires function context\n :type loop_getter_need_context: bool\n :return: ThreadPooled instance, if called as function or argumented decorator, else callable wrapper\n :rtype: typing.Union[ThreadPooled, typing.Callable[..., typing.Union[concurrent.futures.Future, typing.Awaitable]]]\n '
if func is None:
return ThreadPooled(func=func, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context) # depends on [control=['if'], data=['func']] # type: ignore
return ThreadPooled(func=None, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context)(func)
|
def get_service_uid_from(self, analysis):
"""Return the service from the analysis
"""
analysis = api.get_object(analysis)
return api.get_uid(analysis.getAnalysisService())
|
def function[get_service_uid_from, parameter[self, analysis]]:
constant[Return the service from the analysis
]
variable[analysis] assign[=] call[name[api].get_object, parameter[name[analysis]]]
return[call[name[api].get_uid, parameter[call[name[analysis].getAnalysisService, parameter[]]]]]
|
keyword[def] identifier[get_service_uid_from] ( identifier[self] , identifier[analysis] ):
literal[string]
identifier[analysis] = identifier[api] . identifier[get_object] ( identifier[analysis] )
keyword[return] identifier[api] . identifier[get_uid] ( identifier[analysis] . identifier[getAnalysisService] ())
|
def get_service_uid_from(self, analysis):
"""Return the service from the analysis
"""
analysis = api.get_object(analysis)
return api.get_uid(analysis.getAnalysisService())
|
def view(self, *args, **kwargs):
"""Decorator to automatically apply as_view decorator and register it.
"""
def decorator(f):
kwargs.setdefault("view_class", self.view_class)
return self.add_view(as_view(*args, **kwargs)(f))
return decorator
|
def function[view, parameter[self]]:
constant[Decorator to automatically apply as_view decorator and register it.
]
def function[decorator, parameter[f]]:
call[name[kwargs].setdefault, parameter[constant[view_class], name[self].view_class]]
return[call[name[self].add_view, parameter[call[call[name[as_view], parameter[<ast.Starred object at 0x7da20c993910>]], parameter[name[f]]]]]]
return[name[decorator]]
|
keyword[def] identifier[view] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[self] . identifier[view_class] )
keyword[return] identifier[self] . identifier[add_view] ( identifier[as_view] (* identifier[args] ,** identifier[kwargs] )( identifier[f] ))
keyword[return] identifier[decorator]
|
def view(self, *args, **kwargs):
"""Decorator to automatically apply as_view decorator and register it.
"""
def decorator(f):
kwargs.setdefault('view_class', self.view_class)
return self.add_view(as_view(*args, **kwargs)(f))
return decorator
|
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
if len(tuple(tensor_proto.dims)) > 0:
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
else:
# If onnx's params are scalar values without dims mentioned.
np_array = np.array([to_array(tensor_proto)])
return nd.array(np_array)
|
def function[_parse_array, parameter[self, tensor_proto]]:
constant[Grab data in TensorProto and convert to numpy array.]
<ast.Try object at 0x7da1b1ef2470>
if compare[call[name[len], parameter[call[name[tuple], parameter[name[tensor_proto].dims]]]] greater[>] constant[0]] begin[:]
variable[np_array] assign[=] call[call[name[to_array], parameter[name[tensor_proto]]].reshape, parameter[call[name[tuple], parameter[name[tensor_proto].dims]]]]
return[call[name[nd].array, parameter[name[np_array]]]]
|
keyword[def] identifier[_parse_array] ( identifier[self] , identifier[tensor_proto] ):
literal[string]
keyword[try] :
keyword[from] identifier[onnx] . identifier[numpy_helper] keyword[import] identifier[to_array]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[ImportError] ( literal[string]
+ literal[string] )
keyword[if] identifier[len] ( identifier[tuple] ( identifier[tensor_proto] . identifier[dims] ))> literal[int] :
identifier[np_array] = identifier[to_array] ( identifier[tensor_proto] ). identifier[reshape] ( identifier[tuple] ( identifier[tensor_proto] . identifier[dims] ))
keyword[else] :
identifier[np_array] = identifier[np] . identifier[array] ([ identifier[to_array] ( identifier[tensor_proto] )])
keyword[return] identifier[nd] . identifier[array] ( identifier[np_array] )
|
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array # depends on [control=['try'], data=[]]
except ImportError:
raise ImportError('Onnx and protobuf need to be installed. ' + 'Instructions to install - https://github.com/onnx/onnx') # depends on [control=['except'], data=[]]
if len(tuple(tensor_proto.dims)) > 0:
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims)) # depends on [control=['if'], data=[]]
else:
# If onnx's params are scalar values without dims mentioned.
np_array = np.array([to_array(tensor_proto)])
return nd.array(np_array)
|
def reset_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission'):
"""
Resets an attribute of a snapshot to its default value.
:type snapshot_id: string
:param snapshot_id: ID of the snapshot
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'SnapshotId' : snapshot_id,
'Attribute' : attribute}
return self.get_status('ResetSnapshotAttribute', params, verb='POST')
|
def function[reset_snapshot_attribute, parameter[self, snapshot_id, attribute]]:
constant[
Resets an attribute of a snapshot to its default value.
:type snapshot_id: string
:param snapshot_id: ID of the snapshot
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2616410>, <ast.Constant object at 0x7da1b2617940>], [<ast.Name object at 0x7da1b2614880>, <ast.Name object at 0x7da1b26143a0>]]
return[call[name[self].get_status, parameter[constant[ResetSnapshotAttribute], name[params]]]]
|
keyword[def] identifier[reset_snapshot_attribute] ( identifier[self] , identifier[snapshot_id] ,
identifier[attribute] = literal[string] ):
literal[string]
identifier[params] ={ literal[string] : identifier[snapshot_id] ,
literal[string] : identifier[attribute] }
keyword[return] identifier[self] . identifier[get_status] ( literal[string] , identifier[params] , identifier[verb] = literal[string] )
|
def reset_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'):
"""
Resets an attribute of a snapshot to its default value.
:type snapshot_id: string
:param snapshot_id: ID of the snapshot
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'SnapshotId': snapshot_id, 'Attribute': attribute}
return self.get_status('ResetSnapshotAttribute', params, verb='POST')
|
def get_all_integration_statuses(self, **kwargs): # noqa: E501
"""Gets the status of all Wavefront integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_integration_statuses(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerMapStringIntegrationStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_integration_statuses_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_integration_statuses_with_http_info(**kwargs) # noqa: E501
return data
|
def function[get_all_integration_statuses, parameter[self]]:
constant[Gets the status of all Wavefront integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_integration_statuses(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerMapStringIntegrationStatus
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].get_all_integration_statuses_with_http_info, parameter[]]]
|
keyword[def] identifier[get_all_integration_statuses] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_all_integration_statuses_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_all_integration_statuses_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data]
|
def get_all_integration_statuses(self, **kwargs): # noqa: E501
'Gets the status of all Wavefront integrations # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_all_integration_statuses(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :return: ResponseContainerMapStringIntegrationStatus\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_integration_statuses_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.get_all_integration_statuses_with_http_info(**kwargs) # noqa: E501
return data
|
def intersect(self, other):
"""Calculate the intersection of this rectangle and another rectangle.
Args:
other (Rect): The other rectangle.
Returns:
Rect: The intersection of this rectangle and the given other rectangle, or None if there is no such
intersection.
"""
intersection = Rect()
if lib.SDL_IntersectRect(self._ptr, self._ptr, intersection._ptr):
return intersection
else:
return None
|
def function[intersect, parameter[self, other]]:
constant[Calculate the intersection of this rectangle and another rectangle.
Args:
other (Rect): The other rectangle.
Returns:
Rect: The intersection of this rectangle and the given other rectangle, or None if there is no such
intersection.
]
variable[intersection] assign[=] call[name[Rect], parameter[]]
if call[name[lib].SDL_IntersectRect, parameter[name[self]._ptr, name[self]._ptr, name[intersection]._ptr]] begin[:]
return[name[intersection]]
|
keyword[def] identifier[intersect] ( identifier[self] , identifier[other] ):
literal[string]
identifier[intersection] = identifier[Rect] ()
keyword[if] identifier[lib] . identifier[SDL_IntersectRect] ( identifier[self] . identifier[_ptr] , identifier[self] . identifier[_ptr] , identifier[intersection] . identifier[_ptr] ):
keyword[return] identifier[intersection]
keyword[else] :
keyword[return] keyword[None]
|
def intersect(self, other):
"""Calculate the intersection of this rectangle and another rectangle.
Args:
other (Rect): The other rectangle.
Returns:
Rect: The intersection of this rectangle and the given other rectangle, or None if there is no such
intersection.
"""
intersection = Rect()
if lib.SDL_IntersectRect(self._ptr, self._ptr, intersection._ptr):
return intersection # depends on [control=['if'], data=[]]
else:
return None
|
def sort_by_modified(files_or_folders: list) -> list:
"""
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
"""
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
|
def function[sort_by_modified, parameter[files_or_folders]]:
constant[
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
]
return[call[name[sorted], parameter[name[files_or_folders]]]]
|
keyword[def] identifier[sort_by_modified] ( identifier[files_or_folders] : identifier[list] )-> identifier[list] :
literal[string]
keyword[return] identifier[sorted] ( identifier[files_or_folders] , identifier[key] = identifier[os] . identifier[path] . identifier[getmtime] , identifier[reverse] = keyword[True] )
|
def sort_by_modified(files_or_folders: list) -> list:
"""
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
"""
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
|
def add(self, value):
"""Add a value, and return current average."""
self._data.append(value)
if len(self._data) > self._max_count:
self._data.popleft()
return sum(self._data)/len(self._data)
|
def function[add, parameter[self, value]]:
constant[Add a value, and return current average.]
call[name[self]._data.append, parameter[name[value]]]
if compare[call[name[len], parameter[name[self]._data]] greater[>] name[self]._max_count] begin[:]
call[name[self]._data.popleft, parameter[]]
return[binary_operation[call[name[sum], parameter[name[self]._data]] / call[name[len], parameter[name[self]._data]]]]
|
keyword[def] identifier[add] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[_data] . identifier[append] ( identifier[value] )
keyword[if] identifier[len] ( identifier[self] . identifier[_data] )> identifier[self] . identifier[_max_count] :
identifier[self] . identifier[_data] . identifier[popleft] ()
keyword[return] identifier[sum] ( identifier[self] . identifier[_data] )/ identifier[len] ( identifier[self] . identifier[_data] )
|
def add(self, value):
"""Add a value, and return current average."""
self._data.append(value)
if len(self._data) > self._max_count:
self._data.popleft() # depends on [control=['if'], data=[]]
return sum(self._data) / len(self._data)
|
def _get_options_dic(self, options: List[str]) -> Dict[str, str]:
"""
Convert the option list to a dictionary where the key is the option and the value is the related option.
Is called in the init.
:param options: options given to the plugin.
:type options: List[str]
:return: dictionary which contains the option key as str related to the option string
:rtype Dict[str, str]
"""
options_dic = {}
for option in options:
cur_option = option.split("=")
if len(cur_option) != 2:
self.log.warning(f"'{option}' is not valid and will be ignored.")
options_dic[cur_option[0]] = cur_option[1]
return options_dic
|
def function[_get_options_dic, parameter[self, options]]:
constant[
Convert the option list to a dictionary where the key is the option and the value is the related option.
Is called in the init.
:param options: options given to the plugin.
:type options: List[str]
:return: dictionary which contains the option key as str related to the option string
:rtype Dict[str, str]
]
variable[options_dic] assign[=] dictionary[[], []]
for taget[name[option]] in starred[name[options]] begin[:]
variable[cur_option] assign[=] call[name[option].split, parameter[constant[=]]]
if compare[call[name[len], parameter[name[cur_option]]] not_equal[!=] constant[2]] begin[:]
call[name[self].log.warning, parameter[<ast.JoinedStr object at 0x7da20e956140>]]
call[name[options_dic]][call[name[cur_option]][constant[0]]] assign[=] call[name[cur_option]][constant[1]]
return[name[options_dic]]
|
keyword[def] identifier[_get_options_dic] ( identifier[self] , identifier[options] : identifier[List] [ identifier[str] ])-> identifier[Dict] [ identifier[str] , identifier[str] ]:
literal[string]
identifier[options_dic] ={}
keyword[for] identifier[option] keyword[in] identifier[options] :
identifier[cur_option] = identifier[option] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[cur_option] )!= literal[int] :
identifier[self] . identifier[log] . identifier[warning] ( literal[string] )
identifier[options_dic] [ identifier[cur_option] [ literal[int] ]]= identifier[cur_option] [ literal[int] ]
keyword[return] identifier[options_dic]
|
def _get_options_dic(self, options: List[str]) -> Dict[str, str]:
"""
Convert the option list to a dictionary where the key is the option and the value is the related option.
Is called in the init.
:param options: options given to the plugin.
:type options: List[str]
:return: dictionary which contains the option key as str related to the option string
:rtype Dict[str, str]
"""
options_dic = {}
for option in options:
cur_option = option.split('=')
if len(cur_option) != 2:
self.log.warning(f"'{option}' is not valid and will be ignored.") # depends on [control=['if'], data=[]]
options_dic[cur_option[0]] = cur_option[1] # depends on [control=['for'], data=['option']]
return options_dic
|
def nnz_obs_names(self):
""" wrapper around pyemu.Pst.nnz_obs_names for listing non-zero
observation names
Returns
-------
nnz_obs_names : list
pyemu.Pst.nnz_obs_names
"""
if self.__pst is not None:
return self.pst.nnz_obs_names
else:
return self.jco.obs_names
|
def function[nnz_obs_names, parameter[self]]:
constant[ wrapper around pyemu.Pst.nnz_obs_names for listing non-zero
observation names
Returns
-------
nnz_obs_names : list
pyemu.Pst.nnz_obs_names
]
if compare[name[self].__pst is_not constant[None]] begin[:]
return[name[self].pst.nnz_obs_names]
|
keyword[def] identifier[nnz_obs_names] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__pst] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[pst] . identifier[nnz_obs_names]
keyword[else] :
keyword[return] identifier[self] . identifier[jco] . identifier[obs_names]
|
def nnz_obs_names(self):
""" wrapper around pyemu.Pst.nnz_obs_names for listing non-zero
observation names
Returns
-------
nnz_obs_names : list
pyemu.Pst.nnz_obs_names
"""
if self.__pst is not None:
return self.pst.nnz_obs_names # depends on [control=['if'], data=[]]
else:
return self.jco.obs_names
|
def register(self, request, **kwargs):
"""
Create and immediately log in a new user.
Only require a email to register, username is generated
automatically and a password is random generated and emailed
to the user.
Activation is still required for account uses after specified number
of days.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
email = kwargs['email']
# Generate random password
password = User.objects.make_random_password()
# Generate username based off of the email supplied
username = sha_constructor(str(email)).hexdigest()[:30]
incr = 0
# Ensure the generated username is in fact unqiue
while User.objects.filter(username=username).count() > 0:
incr += 1
username = sha_constructor(str(email + str(incr))).hexdigest()[:30]
# Create the active user
new_user = User.objects.create_user(username, email, password)
new_user.save()
# Create the registration profile, this is still needed because
# the user still needs to activate there account for further users
# after 3 days
registration_profile = RegistrationProfile.objects.create_profile(
new_user)
# Authenticate and login the new user automatically
auth_user = authenticate(username=username, password=password)
login(request, auth_user)
# Set the expiration to when the users browser closes so user
# is forced to log in upon next visit, this should force the user
# to check there email for there generated password.
request.session.set_expiry(0)
# Create a profile instance for the new user if
# AUTH_PROFILE_MODULE is specified in settings
if hasattr(settings, 'AUTH_PROFILE_MODULE') and getattr(settings, 'AUTH_PROFILE_MODULE'):
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
try:
profile = new_user.get_profile()
except model.DoesNotExist:
profile = model(user=new_user)
profile.save()
# Custom send activation email
self.send_activation_email(
new_user, registration_profile, password, site)
# Send user_registered signal
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
|
def function[register, parameter[self, request]]:
constant[
Create and immediately log in a new user.
Only require a email to register, username is generated
automatically and a password is random generated and emailed
to the user.
Activation is still required for account uses after specified number
of days.
]
if name[Site]._meta.installed begin[:]
variable[site] assign[=] call[name[Site].objects.get_current, parameter[]]
variable[email] assign[=] call[name[kwargs]][constant[email]]
variable[password] assign[=] call[name[User].objects.make_random_password, parameter[]]
variable[username] assign[=] call[call[call[name[sha_constructor], parameter[call[name[str], parameter[name[email]]]]].hexdigest, parameter[]]][<ast.Slice object at 0x7da1b28d6560>]
variable[incr] assign[=] constant[0]
while compare[call[call[name[User].objects.filter, parameter[]].count, parameter[]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b28d6dd0>
variable[username] assign[=] call[call[call[name[sha_constructor], parameter[call[name[str], parameter[binary_operation[name[email] + call[name[str], parameter[name[incr]]]]]]]].hexdigest, parameter[]]][<ast.Slice object at 0x7da1b28d6c80>]
variable[new_user] assign[=] call[name[User].objects.create_user, parameter[name[username], name[email], name[password]]]
call[name[new_user].save, parameter[]]
variable[registration_profile] assign[=] call[name[RegistrationProfile].objects.create_profile, parameter[name[new_user]]]
variable[auth_user] assign[=] call[name[authenticate], parameter[]]
call[name[login], parameter[name[request], name[auth_user]]]
call[name[request].session.set_expiry, parameter[constant[0]]]
if <ast.BoolOp object at 0x7da2041d8e50> begin[:]
<ast.Tuple object at 0x7da2041daa10> assign[=] call[name[settings].AUTH_PROFILE_MODULE.split, parameter[constant[.]]]
variable[model] assign[=] call[name[models].get_model, parameter[name[app_label], name[model_name]]]
<ast.Try object at 0x7da1b2865210>
call[name[self].send_activation_email, parameter[name[new_user], name[registration_profile], name[password], name[site]]]
call[name[signals].user_registered.send, parameter[]]
return[name[new_user]]
|
keyword[def] identifier[register] ( identifier[self] , identifier[request] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[Site] . identifier[_meta] . identifier[installed] :
identifier[site] = identifier[Site] . identifier[objects] . identifier[get_current] ()
keyword[else] :
identifier[site] = identifier[RequestSite] ( identifier[request] )
identifier[email] = identifier[kwargs] [ literal[string] ]
identifier[password] = identifier[User] . identifier[objects] . identifier[make_random_password] ()
identifier[username] = identifier[sha_constructor] ( identifier[str] ( identifier[email] )). identifier[hexdigest] ()[: literal[int] ]
identifier[incr] = literal[int]
keyword[while] identifier[User] . identifier[objects] . identifier[filter] ( identifier[username] = identifier[username] ). identifier[count] ()> literal[int] :
identifier[incr] += literal[int]
identifier[username] = identifier[sha_constructor] ( identifier[str] ( identifier[email] + identifier[str] ( identifier[incr] ))). identifier[hexdigest] ()[: literal[int] ]
identifier[new_user] = identifier[User] . identifier[objects] . identifier[create_user] ( identifier[username] , identifier[email] , identifier[password] )
identifier[new_user] . identifier[save] ()
identifier[registration_profile] = identifier[RegistrationProfile] . identifier[objects] . identifier[create_profile] (
identifier[new_user] )
identifier[auth_user] = identifier[authenticate] ( identifier[username] = identifier[username] , identifier[password] = identifier[password] )
identifier[login] ( identifier[request] , identifier[auth_user] )
identifier[request] . identifier[session] . identifier[set_expiry] ( literal[int] )
keyword[if] identifier[hasattr] ( identifier[settings] , literal[string] ) keyword[and] identifier[getattr] ( identifier[settings] , literal[string] ):
identifier[app_label] , identifier[model_name] = identifier[settings] . identifier[AUTH_PROFILE_MODULE] . identifier[split] ( literal[string] )
identifier[model] = identifier[models] . identifier[get_model] ( identifier[app_label] , identifier[model_name] )
keyword[try] :
identifier[profile] = identifier[new_user] . identifier[get_profile] ()
keyword[except] identifier[model] . identifier[DoesNotExist] :
identifier[profile] = identifier[model] ( identifier[user] = identifier[new_user] )
identifier[profile] . identifier[save] ()
identifier[self] . identifier[send_activation_email] (
identifier[new_user] , identifier[registration_profile] , identifier[password] , identifier[site] )
identifier[signals] . identifier[user_registered] . identifier[send] ( identifier[sender] = identifier[self] . identifier[__class__] ,
identifier[user] = identifier[new_user] ,
identifier[request] = identifier[request] )
keyword[return] identifier[new_user]
|
def register(self, request, **kwargs):
"""
Create and immediately log in a new user.
Only require a email to register, username is generated
automatically and a password is random generated and emailed
to the user.
Activation is still required for account uses after specified number
of days.
"""
if Site._meta.installed:
site = Site.objects.get_current() # depends on [control=['if'], data=[]]
else:
site = RequestSite(request)
email = kwargs['email']
# Generate random password
password = User.objects.make_random_password()
# Generate username based off of the email supplied
username = sha_constructor(str(email)).hexdigest()[:30]
incr = 0
# Ensure the generated username is in fact unqiue
while User.objects.filter(username=username).count() > 0:
incr += 1
username = sha_constructor(str(email + str(incr))).hexdigest()[:30] # depends on [control=['while'], data=[]]
# Create the active user
new_user = User.objects.create_user(username, email, password)
new_user.save()
# Create the registration profile, this is still needed because
# the user still needs to activate there account for further users
# after 3 days
registration_profile = RegistrationProfile.objects.create_profile(new_user)
# Authenticate and login the new user automatically
auth_user = authenticate(username=username, password=password)
login(request, auth_user)
# Set the expiration to when the users browser closes so user
# is forced to log in upon next visit, this should force the user
# to check there email for there generated password.
request.session.set_expiry(0) # Create a profile instance for the new user if
# AUTH_PROFILE_MODULE is specified in settings
if hasattr(settings, 'AUTH_PROFILE_MODULE') and getattr(settings, 'AUTH_PROFILE_MODULE'):
(app_label, model_name) = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
try:
profile = new_user.get_profile() # depends on [control=['try'], data=[]]
except model.DoesNotExist:
profile = model(user=new_user)
profile.save() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Custom send activation email
self.send_activation_email(new_user, registration_profile, password, site)
# Send user_registered signal
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
|
def is_valid_for(self, entry_point, protocol):
"""Check if the current function can be executed from a request to the given entry point
and with the given protocol"""
return self.available_for_entry_point(entry_point) and self.available_for_protocol(protocol)
|
def function[is_valid_for, parameter[self, entry_point, protocol]]:
constant[Check if the current function can be executed from a request to the given entry point
and with the given protocol]
return[<ast.BoolOp object at 0x7da1b04f5cf0>]
|
keyword[def] identifier[is_valid_for] ( identifier[self] , identifier[entry_point] , identifier[protocol] ):
literal[string]
keyword[return] identifier[self] . identifier[available_for_entry_point] ( identifier[entry_point] ) keyword[and] identifier[self] . identifier[available_for_protocol] ( identifier[protocol] )
|
def is_valid_for(self, entry_point, protocol):
"""Check if the current function can be executed from a request to the given entry point
and with the given protocol"""
return self.available_for_entry_point(entry_point) and self.available_for_protocol(protocol)
|
def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self
|
def function[parse_xml, parameter[self, node]]:
constant[ Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
]
def function[read_points, parameter[text]]:
constant[parse a text string of float tuples and return [(x,...),...]
]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b08133a0>]]]
call[name[self]._set_properties, parameter[name[node]]]
if name[self].gid begin[:]
name[self].gid assign[=] call[name[self].parent.register_gid, parameter[name[self].gid]]
variable[points] assign[=] constant[None]
variable[polygon] assign[=] call[name[node].find, parameter[constant[polygon]]]
if compare[name[polygon] is_not constant[None]] begin[:]
variable[points] assign[=] call[name[read_points], parameter[call[name[polygon].get, parameter[constant[points]]]]]
name[self].closed assign[=] constant[True]
variable[polyline] assign[=] call[name[node].find, parameter[constant[polyline]]]
if compare[name[polyline] is_not constant[None]] begin[:]
variable[points] assign[=] call[name[read_points], parameter[call[name[polyline].get, parameter[constant[points]]]]]
name[self].closed assign[=] constant[False]
if name[points] begin[:]
variable[x1] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da2054a5fc0>, <ast.Name object at 0x7da2054a7310>]]] in starred[name[points]] begin[:]
if compare[name[x] less[<] name[x1]] begin[:]
variable[x1] assign[=] name[x]
if compare[name[x] greater[>] name[x2]] begin[:]
variable[x2] assign[=] name[x]
if compare[name[y] less[<] name[y1]] begin[:]
variable[y1] assign[=] name[y]
if compare[name[y] greater[>] name[y2]] begin[:]
variable[y2] assign[=] name[y]
name[self].width assign[=] binary_operation[call[name[abs], parameter[name[x1]]] + call[name[abs], parameter[name[x2]]]]
name[self].height assign[=] binary_operation[call[name[abs], parameter[name[y1]]] + call[name[abs], parameter[name[y2]]]]
name[self].points assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da2054a4700>]]
return[name[self]]
|
keyword[def] identifier[parse_xml] ( identifier[self] , identifier[node] ):
literal[string]
keyword[def] identifier[read_points] ( identifier[text] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[tuple] ( identifier[map] ( identifier[float] , identifier[i] . identifier[split] ( literal[string] ))) keyword[for] identifier[i] keyword[in] identifier[text] . identifier[split] ())
identifier[self] . identifier[_set_properties] ( identifier[node] )
keyword[if] identifier[self] . identifier[gid] :
identifier[self] . identifier[gid] = identifier[self] . identifier[parent] . identifier[register_gid] ( identifier[self] . identifier[gid] )
identifier[points] = keyword[None]
identifier[polygon] = identifier[node] . identifier[find] ( literal[string] )
keyword[if] identifier[polygon] keyword[is] keyword[not] keyword[None] :
identifier[points] = identifier[read_points] ( identifier[polygon] . identifier[get] ( literal[string] ))
identifier[self] . identifier[closed] = keyword[True]
identifier[polyline] = identifier[node] . identifier[find] ( literal[string] )
keyword[if] identifier[polyline] keyword[is] keyword[not] keyword[None] :
identifier[points] = identifier[read_points] ( identifier[polyline] . identifier[get] ( literal[string] ))
identifier[self] . identifier[closed] = keyword[False]
keyword[if] identifier[points] :
identifier[x1] = identifier[x2] = identifier[y1] = identifier[y2] = literal[int]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[points] :
keyword[if] identifier[x] < identifier[x1] : identifier[x1] = identifier[x]
keyword[if] identifier[x] > identifier[x2] : identifier[x2] = identifier[x]
keyword[if] identifier[y] < identifier[y1] : identifier[y1] = identifier[y]
keyword[if] identifier[y] > identifier[y2] : identifier[y2] = identifier[y]
identifier[self] . identifier[width] = identifier[abs] ( identifier[x1] )+ identifier[abs] ( identifier[x2] )
identifier[self] . identifier[height] = identifier[abs] ( identifier[y1] )+ identifier[abs] ( identifier[y2] )
identifier[self] . identifier[points] = identifier[tuple] (
[( identifier[i] [ literal[int] ]+ identifier[self] . identifier[x] , identifier[i] [ literal[int] ]+ identifier[self] . identifier[y] ) keyword[for] identifier[i] keyword[in] identifier[points] ])
keyword[return] identifier[self]
|
def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple((tuple(map(float, i.split(','))) for i in text.split()))
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid) # depends on [control=['if'], data=[]]
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True # depends on [control=['if'], data=['polygon']]
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False # depends on [control=['if'], data=['polyline']]
if points:
x1 = x2 = y1 = y2 = 0
for (x, y) in points:
if x < x1:
x1 = x # depends on [control=['if'], data=['x', 'x1']]
if x > x2:
x2 = x # depends on [control=['if'], data=['x', 'x2']]
if y < y1:
y1 = y # depends on [control=['if'], data=['y', 'y1']]
if y > y2:
y2 = y # depends on [control=['if'], data=['y', 'y2']] # depends on [control=['for'], data=[]]
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple([(i[0] + self.x, i[1] + self.y) for i in points]) # depends on [control=['if'], data=[]]
return self
|
def from_variant_sequence_and_reference_context(
cls,
variant_sequence,
reference_context,
min_transcript_prefix_length,
max_transcript_mismatches,
include_mismatches_after_variant,
protein_sequence_length=None):
"""
Attempt to translate a single VariantSequence using the reading frame
from a single ReferenceContext.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
min_transcript_prefix_length : int
Minimum number of nucleotides before the variant to test whether
our variant sequence can use the reading frame from a reference
transcript.
max_transcript_mismatches : int
Don't use the reading frame from a context where the cDNA variant
sequences disagrees at more than this number of positions before the
variant nucleotides.
include_mismatches_after_variant : bool
If true, mismatches after the variant nucleotides will also count
against max_transcript_mismatches filtering.
protein_sequence_length : int, optional
Truncate protein to be at most this long
Returns either a ProteinSequence object or None if the number of
mismatches between the RNA and reference transcript sequences exceeds
given threshold.
"""
variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context(
variant_sequence,
reference_context,
min_transcript_prefix_length=min_transcript_prefix_length,
max_transcript_mismatches=max_transcript_mismatches,
include_mismatches_after_variant=include_mismatches_after_variant)
if variant_sequence_in_reading_frame is None:
logger.info("Unable to determine reading frame for %s", variant_sequence)
return None
cdna_sequence = variant_sequence_in_reading_frame.cdna_sequence
cdna_codon_offset = variant_sequence_in_reading_frame.offset_to_first_complete_codon
# get the offsets into the cDNA sequence which pick out the variant nucleotides
cdna_variant_start_offset = variant_sequence_in_reading_frame.variant_cdna_interval_start
cdna_variant_end_offset = variant_sequence_in_reading_frame.variant_cdna_interval_end
# TODO: determine if the first codon is the start codon of a
# transcript, for now any of the unusual start codons like CTG
# will translate to leucine instead of methionine.
variant_amino_acids, ends_with_stop_codon = translate_cdna(
cdna_sequence[cdna_codon_offset:],
first_codon_is_start=False,
mitochondrial=reference_context.mitochondrial)
variant_aa_interval_start, variant_aa_interval_end, frameshift = \
find_mutant_amino_acid_interval(
cdna_sequence=cdna_sequence,
cdna_first_codon_offset=cdna_codon_offset,
cdna_variant_start_offset=cdna_variant_start_offset,
cdna_variant_end_offset=cdna_variant_end_offset,
n_ref=len(reference_context.sequence_at_variant_locus),
n_amino_acids=len(variant_amino_acids))
if protein_sequence_length and len(variant_amino_acids) > protein_sequence_length:
if protein_sequence_length <= variant_aa_interval_start:
logger.warn(
("Truncating amino acid sequence %s "
"to only %d elements loses all variant residues"),
variant_amino_acids,
protein_sequence_length)
return None
# if the protein is too long then shorten it, which implies
# we're no longer stopping due to a stop codon and that the variant
# amino acids might need a new stop index
variant_amino_acids = variant_amino_acids[:protein_sequence_length]
variant_aa_interval_end = min(variant_aa_interval_end, protein_sequence_length)
ends_with_stop_codon = False
return Translation(
amino_acids=variant_amino_acids,
frameshift=frameshift,
ends_with_stop_codon=ends_with_stop_codon,
variant_aa_interval_start=variant_aa_interval_start,
variant_aa_interval_end=variant_aa_interval_end,
untrimmed_variant_sequence=variant_sequence,
reference_context=reference_context,
variant_sequence_in_reading_frame=variant_sequence_in_reading_frame)
|
def function[from_variant_sequence_and_reference_context, parameter[cls, variant_sequence, reference_context, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length]]:
constant[
Attempt to translate a single VariantSequence using the reading frame
from a single ReferenceContext.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
min_transcript_prefix_length : int
Minimum number of nucleotides before the variant to test whether
our variant sequence can use the reading frame from a reference
transcript.
max_transcript_mismatches : int
Don't use the reading frame from a context where the cDNA variant
sequences disagrees at more than this number of positions before the
variant nucleotides.
include_mismatches_after_variant : bool
If true, mismatches after the variant nucleotides will also count
against max_transcript_mismatches filtering.
protein_sequence_length : int, optional
Truncate protein to be at most this long
Returns either a ProteinSequence object or None if the number of
mismatches between the RNA and reference transcript sequences exceeds
given threshold.
]
variable[variant_sequence_in_reading_frame] assign[=] call[name[match_variant_sequence_to_reference_context], parameter[name[variant_sequence], name[reference_context]]]
if compare[name[variant_sequence_in_reading_frame] is constant[None]] begin[:]
call[name[logger].info, parameter[constant[Unable to determine reading frame for %s], name[variant_sequence]]]
return[constant[None]]
variable[cdna_sequence] assign[=] name[variant_sequence_in_reading_frame].cdna_sequence
variable[cdna_codon_offset] assign[=] name[variant_sequence_in_reading_frame].offset_to_first_complete_codon
variable[cdna_variant_start_offset] assign[=] name[variant_sequence_in_reading_frame].variant_cdna_interval_start
variable[cdna_variant_end_offset] assign[=] name[variant_sequence_in_reading_frame].variant_cdna_interval_end
<ast.Tuple object at 0x7da1b2519000> assign[=] call[name[translate_cdna], parameter[call[name[cdna_sequence]][<ast.Slice object at 0x7da1b25190f0>]]]
<ast.Tuple object at 0x7da1b2518250> assign[=] call[name[find_mutant_amino_acid_interval], parameter[]]
if <ast.BoolOp object at 0x7da1b2519480> begin[:]
if compare[name[protein_sequence_length] less_or_equal[<=] name[variant_aa_interval_start]] begin[:]
call[name[logger].warn, parameter[constant[Truncating amino acid sequence %s to only %d elements loses all variant residues], name[variant_amino_acids], name[protein_sequence_length]]]
return[constant[None]]
variable[variant_amino_acids] assign[=] call[name[variant_amino_acids]][<ast.Slice object at 0x7da1b251b0a0>]
variable[variant_aa_interval_end] assign[=] call[name[min], parameter[name[variant_aa_interval_end], name[protein_sequence_length]]]
variable[ends_with_stop_codon] assign[=] constant[False]
return[call[name[Translation], parameter[]]]
|
keyword[def] identifier[from_variant_sequence_and_reference_context] (
identifier[cls] ,
identifier[variant_sequence] ,
identifier[reference_context] ,
identifier[min_transcript_prefix_length] ,
identifier[max_transcript_mismatches] ,
identifier[include_mismatches_after_variant] ,
identifier[protein_sequence_length] = keyword[None] ):
literal[string]
identifier[variant_sequence_in_reading_frame] = identifier[match_variant_sequence_to_reference_context] (
identifier[variant_sequence] ,
identifier[reference_context] ,
identifier[min_transcript_prefix_length] = identifier[min_transcript_prefix_length] ,
identifier[max_transcript_mismatches] = identifier[max_transcript_mismatches] ,
identifier[include_mismatches_after_variant] = identifier[include_mismatches_after_variant] )
keyword[if] identifier[variant_sequence_in_reading_frame] keyword[is] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] , identifier[variant_sequence] )
keyword[return] keyword[None]
identifier[cdna_sequence] = identifier[variant_sequence_in_reading_frame] . identifier[cdna_sequence]
identifier[cdna_codon_offset] = identifier[variant_sequence_in_reading_frame] . identifier[offset_to_first_complete_codon]
identifier[cdna_variant_start_offset] = identifier[variant_sequence_in_reading_frame] . identifier[variant_cdna_interval_start]
identifier[cdna_variant_end_offset] = identifier[variant_sequence_in_reading_frame] . identifier[variant_cdna_interval_end]
identifier[variant_amino_acids] , identifier[ends_with_stop_codon] = identifier[translate_cdna] (
identifier[cdna_sequence] [ identifier[cdna_codon_offset] :],
identifier[first_codon_is_start] = keyword[False] ,
identifier[mitochondrial] = identifier[reference_context] . identifier[mitochondrial] )
identifier[variant_aa_interval_start] , identifier[variant_aa_interval_end] , identifier[frameshift] = identifier[find_mutant_amino_acid_interval] (
identifier[cdna_sequence] = identifier[cdna_sequence] ,
identifier[cdna_first_codon_offset] = identifier[cdna_codon_offset] ,
identifier[cdna_variant_start_offset] = identifier[cdna_variant_start_offset] ,
identifier[cdna_variant_end_offset] = identifier[cdna_variant_end_offset] ,
identifier[n_ref] = identifier[len] ( identifier[reference_context] . identifier[sequence_at_variant_locus] ),
identifier[n_amino_acids] = identifier[len] ( identifier[variant_amino_acids] ))
keyword[if] identifier[protein_sequence_length] keyword[and] identifier[len] ( identifier[variant_amino_acids] )> identifier[protein_sequence_length] :
keyword[if] identifier[protein_sequence_length] <= identifier[variant_aa_interval_start] :
identifier[logger] . identifier[warn] (
( literal[string]
literal[string] ),
identifier[variant_amino_acids] ,
identifier[protein_sequence_length] )
keyword[return] keyword[None]
identifier[variant_amino_acids] = identifier[variant_amino_acids] [: identifier[protein_sequence_length] ]
identifier[variant_aa_interval_end] = identifier[min] ( identifier[variant_aa_interval_end] , identifier[protein_sequence_length] )
identifier[ends_with_stop_codon] = keyword[False]
keyword[return] identifier[Translation] (
identifier[amino_acids] = identifier[variant_amino_acids] ,
identifier[frameshift] = identifier[frameshift] ,
identifier[ends_with_stop_codon] = identifier[ends_with_stop_codon] ,
identifier[variant_aa_interval_start] = identifier[variant_aa_interval_start] ,
identifier[variant_aa_interval_end] = identifier[variant_aa_interval_end] ,
identifier[untrimmed_variant_sequence] = identifier[variant_sequence] ,
identifier[reference_context] = identifier[reference_context] ,
identifier[variant_sequence_in_reading_frame] = identifier[variant_sequence_in_reading_frame] )
|
def from_variant_sequence_and_reference_context(cls, variant_sequence, reference_context, min_transcript_prefix_length, max_transcript_mismatches, include_mismatches_after_variant, protein_sequence_length=None):
"""
Attempt to translate a single VariantSequence using the reading frame
from a single ReferenceContext.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
min_transcript_prefix_length : int
Minimum number of nucleotides before the variant to test whether
our variant sequence can use the reading frame from a reference
transcript.
max_transcript_mismatches : int
Don't use the reading frame from a context where the cDNA variant
sequences disagrees at more than this number of positions before the
variant nucleotides.
include_mismatches_after_variant : bool
If true, mismatches after the variant nucleotides will also count
against max_transcript_mismatches filtering.
protein_sequence_length : int, optional
Truncate protein to be at most this long
Returns either a ProteinSequence object or None if the number of
mismatches between the RNA and reference transcript sequences exceeds
given threshold.
"""
variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context(variant_sequence, reference_context, min_transcript_prefix_length=min_transcript_prefix_length, max_transcript_mismatches=max_transcript_mismatches, include_mismatches_after_variant=include_mismatches_after_variant)
if variant_sequence_in_reading_frame is None:
logger.info('Unable to determine reading frame for %s', variant_sequence)
return None # depends on [control=['if'], data=[]]
cdna_sequence = variant_sequence_in_reading_frame.cdna_sequence
cdna_codon_offset = variant_sequence_in_reading_frame.offset_to_first_complete_codon
# get the offsets into the cDNA sequence which pick out the variant nucleotides
cdna_variant_start_offset = variant_sequence_in_reading_frame.variant_cdna_interval_start
cdna_variant_end_offset = variant_sequence_in_reading_frame.variant_cdna_interval_end
# TODO: determine if the first codon is the start codon of a
# transcript, for now any of the unusual start codons like CTG
# will translate to leucine instead of methionine.
(variant_amino_acids, ends_with_stop_codon) = translate_cdna(cdna_sequence[cdna_codon_offset:], first_codon_is_start=False, mitochondrial=reference_context.mitochondrial)
(variant_aa_interval_start, variant_aa_interval_end, frameshift) = find_mutant_amino_acid_interval(cdna_sequence=cdna_sequence, cdna_first_codon_offset=cdna_codon_offset, cdna_variant_start_offset=cdna_variant_start_offset, cdna_variant_end_offset=cdna_variant_end_offset, n_ref=len(reference_context.sequence_at_variant_locus), n_amino_acids=len(variant_amino_acids))
if protein_sequence_length and len(variant_amino_acids) > protein_sequence_length:
if protein_sequence_length <= variant_aa_interval_start:
logger.warn('Truncating amino acid sequence %s to only %d elements loses all variant residues', variant_amino_acids, protein_sequence_length)
return None # depends on [control=['if'], data=['protein_sequence_length']]
# if the protein is too long then shorten it, which implies
# we're no longer stopping due to a stop codon and that the variant
# amino acids might need a new stop index
variant_amino_acids = variant_amino_acids[:protein_sequence_length]
variant_aa_interval_end = min(variant_aa_interval_end, protein_sequence_length)
ends_with_stop_codon = False # depends on [control=['if'], data=[]]
return Translation(amino_acids=variant_amino_acids, frameshift=frameshift, ends_with_stop_codon=ends_with_stop_codon, variant_aa_interval_start=variant_aa_interval_start, variant_aa_interval_end=variant_aa_interval_end, untrimmed_variant_sequence=variant_sequence, reference_context=reference_context, variant_sequence_in_reading_frame=variant_sequence_in_reading_frame)
|
def _iq_handler(iq_type, payload_class, payload_key, usage_restriction):
"""Method decorator generator for decorating <iq type='get'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `unicode`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_stanza_handled = ("iq", iq_type)
func._pyxmpp_payload_class_handled = payload_class
func._pyxmpp_payload_key = payload_key
func._pyxmpp_usage_restriction = usage_restriction
return func
return decorator
|
def function[_iq_handler, parameter[iq_type, payload_class, payload_key, usage_restriction]]:
constant[Method decorator generator for decorating <iq type='get'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `unicode`
]
def function[decorator, parameter[func]]:
constant[The decorator]
name[func]._pyxmpp_stanza_handled assign[=] tuple[[<ast.Constant object at 0x7da20c6c4190>, <ast.Name object at 0x7da20c6c4f10>]]
name[func]._pyxmpp_payload_class_handled assign[=] name[payload_class]
name[func]._pyxmpp_payload_key assign[=] name[payload_key]
name[func]._pyxmpp_usage_restriction assign[=] name[usage_restriction]
return[name[func]]
return[name[decorator]]
|
keyword[def] identifier[_iq_handler] ( identifier[iq_type] , identifier[payload_class] , identifier[payload_key] , identifier[usage_restriction] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
literal[string]
identifier[func] . identifier[_pyxmpp_stanza_handled] =( literal[string] , identifier[iq_type] )
identifier[func] . identifier[_pyxmpp_payload_class_handled] = identifier[payload_class]
identifier[func] . identifier[_pyxmpp_payload_key] = identifier[payload_key]
identifier[func] . identifier[_pyxmpp_usage_restriction] = identifier[usage_restriction]
keyword[return] identifier[func]
keyword[return] identifier[decorator]
|
def _iq_handler(iq_type, payload_class, payload_key, usage_restriction):
"""Method decorator generator for decorating <iq type='get'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `unicode`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_stanza_handled = ('iq', iq_type)
func._pyxmpp_payload_class_handled = payload_class
func._pyxmpp_payload_key = payload_key
func._pyxmpp_usage_restriction = usage_restriction
return func
return decorator
|
def lookup(self, h):
'''Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
'''
for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield kvlayer_key_to_stream_id((k1, k2))
|
def function[lookup, parameter[self, h]]:
constant[Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
]
for taget[tuple[[<ast.Name object at 0x7da1b0291e40>, <ast.Name object at 0x7da1b0291ae0>, <ast.Name object at 0x7da1b0290bb0>]]] in starred[call[name[self].client.scan_keys, parameter[name[HASH_TF_INDEX_TABLE], tuple[[<ast.Tuple object at 0x7da1b0291540>, <ast.Tuple object at 0x7da1b02900d0>]]]]] begin[:]
<ast.Yield object at 0x7da1b0290100>
|
keyword[def] identifier[lookup] ( identifier[self] , identifier[h] ):
literal[string]
keyword[for] ( identifier[_] , identifier[k1] , identifier[k2] ) keyword[in] identifier[self] . identifier[client] . identifier[scan_keys] ( identifier[HASH_TF_INDEX_TABLE] ,
(( identifier[h] ,),( identifier[h] ,))):
keyword[yield] identifier[kvlayer_key_to_stream_id] (( identifier[k1] , identifier[k2] ))
|
def lookup(self, h):
"""Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
"""
for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE, ((h,), (h,))):
yield kvlayer_key_to_stream_id((k1, k2)) # depends on [control=['for'], data=[]]
|
def generate_signature(payload, secret):
'''use an endpoint specific payload and client secret to generate
a signature for the request'''
payload = _encode(payload)
secret = _encode(secret)
return hmac.new(secret, digestmod=hashlib.sha256,
msg=payload).hexdigest()
|
def function[generate_signature, parameter[payload, secret]]:
constant[use an endpoint specific payload and client secret to generate
a signature for the request]
variable[payload] assign[=] call[name[_encode], parameter[name[payload]]]
variable[secret] assign[=] call[name[_encode], parameter[name[secret]]]
return[call[call[name[hmac].new, parameter[name[secret]]].hexdigest, parameter[]]]
|
keyword[def] identifier[generate_signature] ( identifier[payload] , identifier[secret] ):
literal[string]
identifier[payload] = identifier[_encode] ( identifier[payload] )
identifier[secret] = identifier[_encode] ( identifier[secret] )
keyword[return] identifier[hmac] . identifier[new] ( identifier[secret] , identifier[digestmod] = identifier[hashlib] . identifier[sha256] ,
identifier[msg] = identifier[payload] ). identifier[hexdigest] ()
|
def generate_signature(payload, secret):
"""use an endpoint specific payload and client secret to generate
a signature for the request"""
payload = _encode(payload)
secret = _encode(secret)
return hmac.new(secret, digestmod=hashlib.sha256, msg=payload).hexdigest()
|
def var_set(self, session, **kwargs):
"""
Sets the given variables or prints the current ones. "set answer=42"
"""
if not kwargs:
session.write_line(
self._utils.make_table(
("Name", "Value"), session.variables.items()
)
)
else:
for name, value in kwargs.items():
name = name.strip()
session.set(name, value)
session.write_line("{0}={1}", name, value)
|
def function[var_set, parameter[self, session]]:
constant[
Sets the given variables or prints the current ones. "set answer=42"
]
if <ast.UnaryOp object at 0x7da18f09cbe0> begin[:]
call[name[session].write_line, parameter[call[name[self]._utils.make_table, parameter[tuple[[<ast.Constant object at 0x7da18f09d2d0>, <ast.Constant object at 0x7da18f09c580>]], call[name[session].variables.items, parameter[]]]]]]
|
keyword[def] identifier[var_set] ( identifier[self] , identifier[session] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[kwargs] :
identifier[session] . identifier[write_line] (
identifier[self] . identifier[_utils] . identifier[make_table] (
( literal[string] , literal[string] ), identifier[session] . identifier[variables] . identifier[items] ()
)
)
keyword[else] :
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[name] = identifier[name] . identifier[strip] ()
identifier[session] . identifier[set] ( identifier[name] , identifier[value] )
identifier[session] . identifier[write_line] ( literal[string] , identifier[name] , identifier[value] )
|
def var_set(self, session, **kwargs):
"""
Sets the given variables or prints the current ones. "set answer=42"
"""
if not kwargs:
session.write_line(self._utils.make_table(('Name', 'Value'), session.variables.items())) # depends on [control=['if'], data=[]]
else:
for (name, value) in kwargs.items():
name = name.strip()
session.set(name, value)
session.write_line('{0}={1}', name, value) # depends on [control=['for'], data=[]]
|
def login(self, user, passwd, bank):
""" Login """
logger.info("login...")
if bank not in self.BANKS:
logger.error("Can't find that bank.")
return False
self.useragent = self.BANKS[bank]["u-a"]
self.bankid = self.BANKS[bank]["id"]
login = json.dumps(
{"userId": user, "password": passwd, "useEasyLogin": False,
"generateEasyLoginId": False})
try:
self.request("identification/personalcode", post=login,
method="POST")
except HTTPError as e:
error = json.loads(e.read().decode("utf8"))
logger.error(error["errorMessages"]["fields"][0]["message"])
return False
try:
self.request("profile/")
except HTTPError as e:
error = json.loads(e.read().decode("utf8"))
logger.error(error["errorMessages"]["general"][0]["message"])
return False
profile = json.loads(self.getdata())
if len(profile["banks"]) == 0:
logger.error("Using wrong bank? Can't find any bank info.")
return False
try:
self.profile = profile["banks"][0]["privateProfile"]["id"]
except KeyError:
self.profile = profile['banks'][0]['corporateProfiles'][0]["id"]
try:
self.request("profile/%s" % self.profile, method="POST")
except HTTPError as e:
error = json.loads(e.read().decode("utf8"))
logger.error(error["errorMessages"]["general"][0]["message"])
return False
return True
|
def function[login, parameter[self, user, passwd, bank]]:
constant[ Login ]
call[name[logger].info, parameter[constant[login...]]]
if compare[name[bank] <ast.NotIn object at 0x7da2590d7190> name[self].BANKS] begin[:]
call[name[logger].error, parameter[constant[Can't find that bank.]]]
return[constant[False]]
name[self].useragent assign[=] call[call[name[self].BANKS][name[bank]]][constant[u-a]]
name[self].bankid assign[=] call[call[name[self].BANKS][name[bank]]][constant[id]]
variable[login] assign[=] call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da1b0af5540>, <ast.Constant object at 0x7da1b0af5570>, <ast.Constant object at 0x7da1b0af55a0>, <ast.Constant object at 0x7da1b0af5660>], [<ast.Name object at 0x7da1b0af56c0>, <ast.Name object at 0x7da1b0af5840>, <ast.Constant object at 0x7da1b0af5870>, <ast.Constant object at 0x7da1b0af5720>]]]]
<ast.Try object at 0x7da1b0af5750>
<ast.Try object at 0x7da1b0af7b20>
variable[profile] assign[=] call[name[json].loads, parameter[call[name[self].getdata, parameter[]]]]
if compare[call[name[len], parameter[call[name[profile]][constant[banks]]]] equal[==] constant[0]] begin[:]
call[name[logger].error, parameter[constant[Using wrong bank? Can't find any bank info.]]]
return[constant[False]]
<ast.Try object at 0x7da1b0af66e0>
<ast.Try object at 0x7da1b0b190c0>
return[constant[True]]
|
keyword[def] identifier[login] ( identifier[self] , identifier[user] , identifier[passwd] , identifier[bank] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[bank] keyword[not] keyword[in] identifier[self] . identifier[BANKS] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[self] . identifier[useragent] = identifier[self] . identifier[BANKS] [ identifier[bank] ][ literal[string] ]
identifier[self] . identifier[bankid] = identifier[self] . identifier[BANKS] [ identifier[bank] ][ literal[string] ]
identifier[login] = identifier[json] . identifier[dumps] (
{ literal[string] : identifier[user] , literal[string] : identifier[passwd] , literal[string] : keyword[False] ,
literal[string] : keyword[False] })
keyword[try] :
identifier[self] . identifier[request] ( literal[string] , identifier[post] = identifier[login] ,
identifier[method] = literal[string] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
identifier[error] = identifier[json] . identifier[loads] ( identifier[e] . identifier[read] (). identifier[decode] ( literal[string] ))
identifier[logger] . identifier[error] ( identifier[error] [ literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ])
keyword[return] keyword[False]
keyword[try] :
identifier[self] . identifier[request] ( literal[string] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
identifier[error] = identifier[json] . identifier[loads] ( identifier[e] . identifier[read] (). identifier[decode] ( literal[string] ))
identifier[logger] . identifier[error] ( identifier[error] [ literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ])
keyword[return] keyword[False]
identifier[profile] = identifier[json] . identifier[loads] ( identifier[self] . identifier[getdata] ())
keyword[if] identifier[len] ( identifier[profile] [ literal[string] ])== literal[int] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
keyword[try] :
identifier[self] . identifier[profile] = identifier[profile] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[profile] = identifier[profile] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[int] ][ literal[string] ]
keyword[try] :
identifier[self] . identifier[request] ( literal[string] % identifier[self] . identifier[profile] , identifier[method] = literal[string] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
identifier[error] = identifier[json] . identifier[loads] ( identifier[e] . identifier[read] (). identifier[decode] ( literal[string] ))
identifier[logger] . identifier[error] ( identifier[error] [ literal[string] ][ literal[string] ][ literal[int] ][ literal[string] ])
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def login(self, user, passwd, bank):
""" Login """
logger.info('login...')
if bank not in self.BANKS:
logger.error("Can't find that bank.")
return False # depends on [control=['if'], data=[]]
self.useragent = self.BANKS[bank]['u-a']
self.bankid = self.BANKS[bank]['id']
login = json.dumps({'userId': user, 'password': passwd, 'useEasyLogin': False, 'generateEasyLoginId': False})
try:
self.request('identification/personalcode', post=login, method='POST') # depends on [control=['try'], data=[]]
except HTTPError as e:
error = json.loads(e.read().decode('utf8'))
logger.error(error['errorMessages']['fields'][0]['message'])
return False # depends on [control=['except'], data=['e']]
try:
self.request('profile/') # depends on [control=['try'], data=[]]
except HTTPError as e:
error = json.loads(e.read().decode('utf8'))
logger.error(error['errorMessages']['general'][0]['message'])
return False # depends on [control=['except'], data=['e']]
profile = json.loads(self.getdata())
if len(profile['banks']) == 0:
logger.error("Using wrong bank? Can't find any bank info.")
return False # depends on [control=['if'], data=[]]
try:
self.profile = profile['banks'][0]['privateProfile']['id'] # depends on [control=['try'], data=[]]
except KeyError:
self.profile = profile['banks'][0]['corporateProfiles'][0]['id'] # depends on [control=['except'], data=[]]
try:
self.request('profile/%s' % self.profile, method='POST') # depends on [control=['try'], data=[]]
except HTTPError as e:
error = json.loads(e.read().decode('utf8'))
logger.error(error['errorMessages']['general'][0]['message'])
return False # depends on [control=['except'], data=['e']]
return True
|
def get_tasks_changed_since(self, since):
""" Returns a list of tasks that were changed recently."""
changed_tasks = []
for task in self.client.filter_tasks({'status': 'pending'}):
if task.get(
'modified',
task.get(
'entry',
datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)
)
) >= since:
changed_tasks.append(task)
return changed_tasks
|
def function[get_tasks_changed_since, parameter[self, since]]:
constant[ Returns a list of tasks that were changed recently.]
variable[changed_tasks] assign[=] list[[]]
for taget[name[task]] in starred[call[name[self].client.filter_tasks, parameter[dictionary[[<ast.Constant object at 0x7da1b0b0e710>], [<ast.Constant object at 0x7da1b0b0e770>]]]]] begin[:]
if compare[call[name[task].get, parameter[constant[modified], call[name[task].get, parameter[constant[entry], call[call[name[datetime].datetime, parameter[constant[2000], constant[1], constant[1]]].replace, parameter[]]]]]] greater_or_equal[>=] name[since]] begin[:]
call[name[changed_tasks].append, parameter[name[task]]]
return[name[changed_tasks]]
|
keyword[def] identifier[get_tasks_changed_since] ( identifier[self] , identifier[since] ):
literal[string]
identifier[changed_tasks] =[]
keyword[for] identifier[task] keyword[in] identifier[self] . identifier[client] . identifier[filter_tasks] ({ literal[string] : literal[string] }):
keyword[if] identifier[task] . identifier[get] (
literal[string] ,
identifier[task] . identifier[get] (
literal[string] ,
identifier[datetime] . identifier[datetime] ( literal[int] , literal[int] , literal[int] ). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[utc] )
)
)>= identifier[since] :
identifier[changed_tasks] . identifier[append] ( identifier[task] )
keyword[return] identifier[changed_tasks]
|
def get_tasks_changed_since(self, since):
""" Returns a list of tasks that were changed recently."""
changed_tasks = []
for task in self.client.filter_tasks({'status': 'pending'}):
if task.get('modified', task.get('entry', datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc))) >= since:
changed_tasks.append(task) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']]
return changed_tasks
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.