code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _bbvi_fit(self, optimizer='RMSProp', iterations=1000, print_progress=True,
start_diffuse=False, **kwargs):
""" Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: either RMSProp or ADAM.
iterations: int
How many iterations to run
print_progress : bool
Whether tp print the ELBO progress or not
start_diffuse : bool
Whether to start from diffuse values (if not: use approx Gaussian)
Returns
----------
BBVIResults object
"""
if self.model_name2 in ["t", "Skewt"]:
default_learning_rate = 0.0001
else:
default_learning_rate = 0.001
animate = kwargs.get('animate', False)
batch_size = kwargs.get('batch_size', 24)
learning_rate = kwargs.get('learning_rate', default_learning_rate)
record_elbo = kwargs.get('record_elbo', False)
# Starting values
gaussian_latents = self._preoptimize_model() # find parameters for Gaussian model
phi = self.latent_variables.get_z_starting_values()
q_list = self.initialize_approx_dist(phi, start_diffuse, gaussian_latents)
# PERFORM BBVI
bbvi_obj = ifr.CBBVI(self.neg_logposterior, self.log_p_blanket, q_list, batch_size,
optimizer, iterations, learning_rate, record_elbo)
if print_progress is False:
bbvi_obj.printer = False
if animate is True:
q, q_params, q_ses, stored_z, stored_predictive_likelihood = bbvi_obj.run_and_store()
self._animate_bbvi(stored_z,stored_predictive_likelihood)
else:
q, q_params, q_ses, elbo_records = bbvi_obj.run()
self.latent_variables.set_z_values(q_params[:self.z_no],'BBVI',np.exp(q_ses[:self.z_no]),None)
# STORE RESULTS
for k in range(len(self.latent_variables.z_list)):
self.latent_variables.z_list[k].q = q[k]
theta = q_params[self.z_no:]
Y = self.data
scores = None
states = q_params[self.z_no:]
X_names = None
states_ses = np.exp(q_ses[self.z_no:])
self.states = states
self.states_ses = states_ses
return res.BBVISSResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name,
model_type=self.model_type, latent_variables=self.latent_variables,data=Y,index=self.index,
multivariate_model=self.multivariate_model,objective=self.neg_logposterior(q_params),
method='BBVI',ses=q_ses[:self.z_no],signal=theta,scores=scores,elbo_records=elbo_records,
z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=np.power(states_ses,2)) | def function[_bbvi_fit, parameter[self, optimizer, iterations, print_progress, start_diffuse]]:
constant[ Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: either RMSProp or ADAM.
iterations: int
How many iterations to run
print_progress : bool
Whether tp print the ELBO progress or not
start_diffuse : bool
Whether to start from diffuse values (if not: use approx Gaussian)
Returns
----------
BBVIResults object
]
if compare[name[self].model_name2 in list[[<ast.Constant object at 0x7da1b194faf0>, <ast.Constant object at 0x7da1b194e080>]]] begin[:]
variable[default_learning_rate] assign[=] constant[0.0001]
variable[animate] assign[=] call[name[kwargs].get, parameter[constant[animate], constant[False]]]
variable[batch_size] assign[=] call[name[kwargs].get, parameter[constant[batch_size], constant[24]]]
variable[learning_rate] assign[=] call[name[kwargs].get, parameter[constant[learning_rate], name[default_learning_rate]]]
variable[record_elbo] assign[=] call[name[kwargs].get, parameter[constant[record_elbo], constant[False]]]
variable[gaussian_latents] assign[=] call[name[self]._preoptimize_model, parameter[]]
variable[phi] assign[=] call[name[self].latent_variables.get_z_starting_values, parameter[]]
variable[q_list] assign[=] call[name[self].initialize_approx_dist, parameter[name[phi], name[start_diffuse], name[gaussian_latents]]]
variable[bbvi_obj] assign[=] call[name[ifr].CBBVI, parameter[name[self].neg_logposterior, name[self].log_p_blanket, name[q_list], name[batch_size], name[optimizer], name[iterations], name[learning_rate], name[record_elbo]]]
if compare[name[print_progress] is constant[False]] begin[:]
name[bbvi_obj].printer assign[=] constant[False]
if compare[name[animate] is constant[True]] begin[:]
<ast.Tuple object at 0x7da20e963e20> assign[=] call[name[bbvi_obj].run_and_store, parameter[]]
call[name[self]._animate_bbvi, parameter[name[stored_z], name[stored_predictive_likelihood]]]
call[name[self].latent_variables.set_z_values, parameter[call[name[q_params]][<ast.Slice object at 0x7da20e962ce0>], constant[BBVI], call[name[np].exp, parameter[call[name[q_ses]][<ast.Slice object at 0x7da20e960070>]]], constant[None]]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].latent_variables.z_list]]]]] begin[:]
call[name[self].latent_variables.z_list][name[k]].q assign[=] call[name[q]][name[k]]
variable[theta] assign[=] call[name[q_params]][<ast.Slice object at 0x7da20e962920>]
variable[Y] assign[=] name[self].data
variable[scores] assign[=] constant[None]
variable[states] assign[=] call[name[q_params]][<ast.Slice object at 0x7da20e9603a0>]
variable[X_names] assign[=] constant[None]
variable[states_ses] assign[=] call[name[np].exp, parameter[call[name[q_ses]][<ast.Slice object at 0x7da20e963e80>]]]
name[self].states assign[=] name[states]
name[self].states_ses assign[=] name[states_ses]
return[call[name[res].BBVISSResults, parameter[]]] | keyword[def] identifier[_bbvi_fit] ( identifier[self] , identifier[optimizer] = literal[string] , identifier[iterations] = literal[int] , identifier[print_progress] = keyword[True] ,
identifier[start_diffuse] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[model_name2] keyword[in] [ literal[string] , literal[string] ]:
identifier[default_learning_rate] = literal[int]
keyword[else] :
identifier[default_learning_rate] = literal[int]
identifier[animate] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[batch_size] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[learning_rate] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[default_learning_rate] )
identifier[record_elbo] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[gaussian_latents] = identifier[self] . identifier[_preoptimize_model] ()
identifier[phi] = identifier[self] . identifier[latent_variables] . identifier[get_z_starting_values] ()
identifier[q_list] = identifier[self] . identifier[initialize_approx_dist] ( identifier[phi] , identifier[start_diffuse] , identifier[gaussian_latents] )
identifier[bbvi_obj] = identifier[ifr] . identifier[CBBVI] ( identifier[self] . identifier[neg_logposterior] , identifier[self] . identifier[log_p_blanket] , identifier[q_list] , identifier[batch_size] ,
identifier[optimizer] , identifier[iterations] , identifier[learning_rate] , identifier[record_elbo] )
keyword[if] identifier[print_progress] keyword[is] keyword[False] :
identifier[bbvi_obj] . identifier[printer] = keyword[False]
keyword[if] identifier[animate] keyword[is] keyword[True] :
identifier[q] , identifier[q_params] , identifier[q_ses] , identifier[stored_z] , identifier[stored_predictive_likelihood] = identifier[bbvi_obj] . identifier[run_and_store] ()
identifier[self] . identifier[_animate_bbvi] ( identifier[stored_z] , identifier[stored_predictive_likelihood] )
keyword[else] :
identifier[q] , identifier[q_params] , identifier[q_ses] , identifier[elbo_records] = identifier[bbvi_obj] . identifier[run] ()
identifier[self] . identifier[latent_variables] . identifier[set_z_values] ( identifier[q_params] [: identifier[self] . identifier[z_no] ], literal[string] , identifier[np] . identifier[exp] ( identifier[q_ses] [: identifier[self] . identifier[z_no] ]), keyword[None] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[latent_variables] . identifier[z_list] )):
identifier[self] . identifier[latent_variables] . identifier[z_list] [ identifier[k] ]. identifier[q] = identifier[q] [ identifier[k] ]
identifier[theta] = identifier[q_params] [ identifier[self] . identifier[z_no] :]
identifier[Y] = identifier[self] . identifier[data]
identifier[scores] = keyword[None]
identifier[states] = identifier[q_params] [ identifier[self] . identifier[z_no] :]
identifier[X_names] = keyword[None]
identifier[states_ses] = identifier[np] . identifier[exp] ( identifier[q_ses] [ identifier[self] . identifier[z_no] :])
identifier[self] . identifier[states] = identifier[states]
identifier[self] . identifier[states_ses] = identifier[states_ses]
keyword[return] identifier[res] . identifier[BBVISSResults] ( identifier[data_name] = identifier[self] . identifier[data_name] , identifier[X_names] = identifier[X_names] , identifier[model_name] = identifier[self] . identifier[model_name] ,
identifier[model_type] = identifier[self] . identifier[model_type] , identifier[latent_variables] = identifier[self] . identifier[latent_variables] , identifier[data] = identifier[Y] , identifier[index] = identifier[self] . identifier[index] ,
identifier[multivariate_model] = identifier[self] . identifier[multivariate_model] , identifier[objective] = identifier[self] . identifier[neg_logposterior] ( identifier[q_params] ),
identifier[method] = literal[string] , identifier[ses] = identifier[q_ses] [: identifier[self] . identifier[z_no] ], identifier[signal] = identifier[theta] , identifier[scores] = identifier[scores] , identifier[elbo_records] = identifier[elbo_records] ,
identifier[z_hide] = identifier[self] . identifier[_z_hide] , identifier[max_lag] = identifier[self] . identifier[max_lag] , identifier[states] = identifier[states] , identifier[states_var] = identifier[np] . identifier[power] ( identifier[states_ses] , literal[int] )) | def _bbvi_fit(self, optimizer='RMSProp', iterations=1000, print_progress=True, start_diffuse=False, **kwargs):
""" Performs Black Box Variational Inference
Parameters
----------
posterior : method
Hands bbvi_fit a posterior object
optimizer : string
Stochastic optimizer: either RMSProp or ADAM.
iterations: int
How many iterations to run
print_progress : bool
Whether tp print the ELBO progress or not
start_diffuse : bool
Whether to start from diffuse values (if not: use approx Gaussian)
Returns
----------
BBVIResults object
"""
if self.model_name2 in ['t', 'Skewt']:
default_learning_rate = 0.0001 # depends on [control=['if'], data=[]]
else:
default_learning_rate = 0.001
animate = kwargs.get('animate', False)
batch_size = kwargs.get('batch_size', 24)
learning_rate = kwargs.get('learning_rate', default_learning_rate)
record_elbo = kwargs.get('record_elbo', False)
# Starting values
gaussian_latents = self._preoptimize_model() # find parameters for Gaussian model
phi = self.latent_variables.get_z_starting_values()
q_list = self.initialize_approx_dist(phi, start_diffuse, gaussian_latents)
# PERFORM BBVI
bbvi_obj = ifr.CBBVI(self.neg_logposterior, self.log_p_blanket, q_list, batch_size, optimizer, iterations, learning_rate, record_elbo)
if print_progress is False:
bbvi_obj.printer = False # depends on [control=['if'], data=[]]
if animate is True:
(q, q_params, q_ses, stored_z, stored_predictive_likelihood) = bbvi_obj.run_and_store()
self._animate_bbvi(stored_z, stored_predictive_likelihood) # depends on [control=['if'], data=[]]
else:
(q, q_params, q_ses, elbo_records) = bbvi_obj.run()
self.latent_variables.set_z_values(q_params[:self.z_no], 'BBVI', np.exp(q_ses[:self.z_no]), None)
# STORE RESULTS
for k in range(len(self.latent_variables.z_list)):
self.latent_variables.z_list[k].q = q[k] # depends on [control=['for'], data=['k']]
theta = q_params[self.z_no:]
Y = self.data
scores = None
states = q_params[self.z_no:]
X_names = None
states_ses = np.exp(q_ses[self.z_no:])
self.states = states
self.states_ses = states_ses
return res.BBVISSResults(data_name=self.data_name, X_names=X_names, model_name=self.model_name, model_type=self.model_type, latent_variables=self.latent_variables, data=Y, index=self.index, multivariate_model=self.multivariate_model, objective=self.neg_logposterior(q_params), method='BBVI', ses=q_ses[:self.z_no], signal=theta, scores=scores, elbo_records=elbo_records, z_hide=self._z_hide, max_lag=self.max_lag, states=states, states_var=np.power(states_ses, 2)) |
def repr_setup(self, name=None, col_names=None, col_types=None):
"""
This wasn't safe to pass into init because of the inheritance
:param name: name of the api return type (ex. CAMERA_DATA_LIST)
:param col_names:
:param col_types:
:return None:
"""
self._name = name or self._name | def function[repr_setup, parameter[self, name, col_names, col_types]]:
constant[
This wasn't safe to pass into init because of the inheritance
:param name: name of the api return type (ex. CAMERA_DATA_LIST)
:param col_names:
:param col_types:
:return None:
]
name[self]._name assign[=] <ast.BoolOp object at 0x7da1b142a3b0> | keyword[def] identifier[repr_setup] ( identifier[self] , identifier[name] = keyword[None] , identifier[col_names] = keyword[None] , identifier[col_types] = keyword[None] ):
literal[string]
identifier[self] . identifier[_name] = identifier[name] keyword[or] identifier[self] . identifier[_name] | def repr_setup(self, name=None, col_names=None, col_types=None):
"""
This wasn't safe to pass into init because of the inheritance
:param name: name of the api return type (ex. CAMERA_DATA_LIST)
:param col_names:
:param col_types:
:return None:
"""
self._name = name or self._name |
def follow_info(self, index=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html>`_
:arg index: A comma-separated list of index patterns; use `_all` to
perform the operation on all indices
"""
return self.transport.perform_request(
"GET", _make_path(index, "_ccr", "info"), params=params
) | def function[follow_info, parameter[self, index, params]]:
constant[
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html>`_
:arg index: A comma-separated list of index patterns; use `_all` to
perform the operation on all indices
]
return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[name[index], constant[_ccr], constant[info]]]]]] | keyword[def] identifier[follow_info] ( identifier[self] , identifier[index] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( identifier[index] , literal[string] , literal[string] ), identifier[params] = identifier[params]
) | def follow_info(self, index=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html>`_
:arg index: A comma-separated list of index patterns; use `_all` to
perform the operation on all indices
"""
return self.transport.perform_request('GET', _make_path(index, '_ccr', 'info'), params=params) |
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb | def function[extras_msg, parameter[extras]]:
constant[
Create an error message for extra items or properties.
]
if compare[call[name[len], parameter[name[extras]]] equal[==] constant[1]] begin[:]
variable[verb] assign[=] constant[was]
return[tuple[[<ast.Call object at 0x7da20e9b3be0>, <ast.Name object at 0x7da1b2345e10>]]] | keyword[def] identifier[extras_msg] ( identifier[extras] ):
literal[string]
keyword[if] identifier[len] ( identifier[extras] )== literal[int] :
identifier[verb] = literal[string]
keyword[else] :
identifier[verb] = literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[repr] ( identifier[extra] ) keyword[for] identifier[extra] keyword[in] identifier[extras] ), identifier[verb] | def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = 'was' # depends on [control=['if'], data=[]]
else:
verb = 'were'
return (', '.join((repr(extra) for extra in extras)), verb) |
def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find("b")+1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust,
criterion="maxclust")
# Group members of each cluster together
cluster_dict = dict((c, []) for c in set(clusters))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i])
return cluster_dict | def function[do_clustering, parameter[types, max_clust]]:
constant[
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
]
variable[ls] assign[=] <ast.ListComp object at 0x7da1b1494610>
call[name[prepend_zeros_to_lists], parameter[name[ls]]]
variable[dist_matrix] assign[=] call[name[pdist], parameter[name[ls], name[weighted_hamming]]]
variable[clusters] assign[=] call[name[hierarchicalcluster].complete, parameter[name[dist_matrix]]]
variable[clusters] assign[=] call[name[hierarchicalcluster].fcluster, parameter[name[clusters], name[max_clust]]]
variable[cluster_dict] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b1454820>]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[types]]]]]] begin[:]
call[call[name[cluster_dict]][call[name[clusters]][name[i]]].append, parameter[call[name[types]][name[i]]]]
return[name[cluster_dict]] | keyword[def] identifier[do_clustering] ( identifier[types] , identifier[max_clust] ):
literal[string]
identifier[ls] =[ identifier[list] ( identifier[t] [ identifier[t] . identifier[find] ( literal[string] )+ literal[int] :]) keyword[for] identifier[t] keyword[in] identifier[types] ]
identifier[prepend_zeros_to_lists] ( identifier[ls] )
identifier[dist_matrix] = identifier[pdist] ( identifier[ls] , identifier[weighted_hamming] )
identifier[clusters] = identifier[hierarchicalcluster] . identifier[complete] ( identifier[dist_matrix] )
identifier[clusters] = identifier[hierarchicalcluster] . identifier[fcluster] ( identifier[clusters] , identifier[max_clust] ,
identifier[criterion] = literal[string] )
identifier[cluster_dict] = identifier[dict] (( identifier[c] ,[]) keyword[for] identifier[c] keyword[in] identifier[set] ( identifier[clusters] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[types] )):
identifier[cluster_dict] [ identifier[clusters] [ identifier[i] ]]. identifier[append] ( identifier[types] [ identifier[i] ])
keyword[return] identifier[cluster_dict] | def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find('b') + 1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust, criterion='maxclust')
# Group members of each cluster together
cluster_dict = dict(((c, []) for c in set(clusters)))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i]) # depends on [control=['for'], data=['i']]
return cluster_dict |
def has_mixed_eol_chars(text):
"""Detect if text has mixed EOL characters"""
eol_chars = get_eol_chars(text)
if eol_chars is None:
return False
correct_text = eol_chars.join((text+eol_chars).splitlines())
return repr(correct_text) != repr(text) | def function[has_mixed_eol_chars, parameter[text]]:
constant[Detect if text has mixed EOL characters]
variable[eol_chars] assign[=] call[name[get_eol_chars], parameter[name[text]]]
if compare[name[eol_chars] is constant[None]] begin[:]
return[constant[False]]
variable[correct_text] assign[=] call[name[eol_chars].join, parameter[call[binary_operation[name[text] + name[eol_chars]].splitlines, parameter[]]]]
return[compare[call[name[repr], parameter[name[correct_text]]] not_equal[!=] call[name[repr], parameter[name[text]]]]] | keyword[def] identifier[has_mixed_eol_chars] ( identifier[text] ):
literal[string]
identifier[eol_chars] = identifier[get_eol_chars] ( identifier[text] )
keyword[if] identifier[eol_chars] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[correct_text] = identifier[eol_chars] . identifier[join] (( identifier[text] + identifier[eol_chars] ). identifier[splitlines] ())
keyword[return] identifier[repr] ( identifier[correct_text] )!= identifier[repr] ( identifier[text] ) | def has_mixed_eol_chars(text):
"""Detect if text has mixed EOL characters"""
eol_chars = get_eol_chars(text)
if eol_chars is None:
return False # depends on [control=['if'], data=[]]
correct_text = eol_chars.join((text + eol_chars).splitlines())
return repr(correct_text) != repr(text) |
def gitlab(self):
"""Generate gitlab details."""
main_name = self.format['git_repo'].format(**self.data)
qe_name = self.format['git_repo_qe'].format(**self.data)
config_name = self.format['git_repo_configs'].format(**self.data)
git = {
'config': config_name,
'main': main_name,
'qe': qe_name,
}
return git | def function[gitlab, parameter[self]]:
constant[Generate gitlab details.]
variable[main_name] assign[=] call[call[name[self].format][constant[git_repo]].format, parameter[]]
variable[qe_name] assign[=] call[call[name[self].format][constant[git_repo_qe]].format, parameter[]]
variable[config_name] assign[=] call[call[name[self].format][constant[git_repo_configs]].format, parameter[]]
variable[git] assign[=] dictionary[[<ast.Constant object at 0x7da18fe91ba0>, <ast.Constant object at 0x7da18fe90610>, <ast.Constant object at 0x7da18fe928f0>], [<ast.Name object at 0x7da18fe90a90>, <ast.Name object at 0x7da18fe921d0>, <ast.Name object at 0x7da18fe92350>]]
return[name[git]] | keyword[def] identifier[gitlab] ( identifier[self] ):
literal[string]
identifier[main_name] = identifier[self] . identifier[format] [ literal[string] ]. identifier[format] (** identifier[self] . identifier[data] )
identifier[qe_name] = identifier[self] . identifier[format] [ literal[string] ]. identifier[format] (** identifier[self] . identifier[data] )
identifier[config_name] = identifier[self] . identifier[format] [ literal[string] ]. identifier[format] (** identifier[self] . identifier[data] )
identifier[git] ={
literal[string] : identifier[config_name] ,
literal[string] : identifier[main_name] ,
literal[string] : identifier[qe_name] ,
}
keyword[return] identifier[git] | def gitlab(self):
"""Generate gitlab details."""
main_name = self.format['git_repo'].format(**self.data)
qe_name = self.format['git_repo_qe'].format(**self.data)
config_name = self.format['git_repo_configs'].format(**self.data)
git = {'config': config_name, 'main': main_name, 'qe': qe_name}
return git |
def reorder_resolved_levels(storage, debug):
"""L1 and L2 rules"""
# Applies L1.
should_reset = True
chars = storage['chars']
for _ch in chars[::-1]:
# L1. On each line, reset the embedding level of the following
# characters to the paragraph embedding level:
if _ch['orig'] in ('B', 'S'):
# 1. Segment separators,
# 2. Paragraph separators,
_ch['level'] = storage['base_level']
should_reset = True
elif should_reset and _ch['orig'] in ('BN', 'WS'):
# 3. Any sequence of whitespace characters preceding a segment
# separator or paragraph separator
# 4. Any sequence of white space characters at the end of the
# line.
_ch['level'] = storage['base_level']
else:
should_reset = False
max_len = len(chars)
# L2 should be per line
# Calculates highest level and loweset odd level on the fly.
line_start = line_end = 0
highest_level = 0
lowest_odd_level = EXPLICIT_LEVEL_LIMIT
for idx in range(max_len):
_ch = chars[idx]
# calc the levels
char_level = _ch['level']
if char_level > highest_level:
highest_level = char_level
if char_level % 2 and char_level < lowest_odd_level:
lowest_odd_level = char_level
if _ch['orig'] == 'B' or idx == max_len - 1:
line_end = idx
# omit line breaks
if _ch['orig'] == 'B':
line_end -= 1
reverse_contiguous_sequence(chars, line_start, line_end,
highest_level, lowest_odd_level)
# reset for next line run
line_start = idx+1
highest_level = 0
lowest_odd_level = EXPLICIT_LEVEL_LIMIT
if debug:
debug_storage(storage) | def function[reorder_resolved_levels, parameter[storage, debug]]:
constant[L1 and L2 rules]
variable[should_reset] assign[=] constant[True]
variable[chars] assign[=] call[name[storage]][constant[chars]]
for taget[name[_ch]] in starred[call[name[chars]][<ast.Slice object at 0x7da1b02145e0>]] begin[:]
if compare[call[name[_ch]][constant[orig]] in tuple[[<ast.Constant object at 0x7da1b0215210>, <ast.Constant object at 0x7da1b0214250>]]] begin[:]
call[name[_ch]][constant[level]] assign[=] call[name[storage]][constant[base_level]]
variable[should_reset] assign[=] constant[True]
variable[max_len] assign[=] call[name[len], parameter[name[chars]]]
variable[line_start] assign[=] constant[0]
variable[highest_level] assign[=] constant[0]
variable[lowest_odd_level] assign[=] name[EXPLICIT_LEVEL_LIMIT]
for taget[name[idx]] in starred[call[name[range], parameter[name[max_len]]]] begin[:]
variable[_ch] assign[=] call[name[chars]][name[idx]]
variable[char_level] assign[=] call[name[_ch]][constant[level]]
if compare[name[char_level] greater[>] name[highest_level]] begin[:]
variable[highest_level] assign[=] name[char_level]
if <ast.BoolOp object at 0x7da1b0217a30> begin[:]
variable[lowest_odd_level] assign[=] name[char_level]
if <ast.BoolOp object at 0x7da1b0214070> begin[:]
variable[line_end] assign[=] name[idx]
if compare[call[name[_ch]][constant[orig]] equal[==] constant[B]] begin[:]
<ast.AugAssign object at 0x7da1b02179a0>
call[name[reverse_contiguous_sequence], parameter[name[chars], name[line_start], name[line_end], name[highest_level], name[lowest_odd_level]]]
variable[line_start] assign[=] binary_operation[name[idx] + constant[1]]
variable[highest_level] assign[=] constant[0]
variable[lowest_odd_level] assign[=] name[EXPLICIT_LEVEL_LIMIT]
if name[debug] begin[:]
call[name[debug_storage], parameter[name[storage]]] | keyword[def] identifier[reorder_resolved_levels] ( identifier[storage] , identifier[debug] ):
literal[string]
identifier[should_reset] = keyword[True]
identifier[chars] = identifier[storage] [ literal[string] ]
keyword[for] identifier[_ch] keyword[in] identifier[chars] [::- literal[int] ]:
keyword[if] identifier[_ch] [ literal[string] ] keyword[in] ( literal[string] , literal[string] ):
identifier[_ch] [ literal[string] ]= identifier[storage] [ literal[string] ]
identifier[should_reset] = keyword[True]
keyword[elif] identifier[should_reset] keyword[and] identifier[_ch] [ literal[string] ] keyword[in] ( literal[string] , literal[string] ):
identifier[_ch] [ literal[string] ]= identifier[storage] [ literal[string] ]
keyword[else] :
identifier[should_reset] = keyword[False]
identifier[max_len] = identifier[len] ( identifier[chars] )
identifier[line_start] = identifier[line_end] = literal[int]
identifier[highest_level] = literal[int]
identifier[lowest_odd_level] = identifier[EXPLICIT_LEVEL_LIMIT]
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[max_len] ):
identifier[_ch] = identifier[chars] [ identifier[idx] ]
identifier[char_level] = identifier[_ch] [ literal[string] ]
keyword[if] identifier[char_level] > identifier[highest_level] :
identifier[highest_level] = identifier[char_level]
keyword[if] identifier[char_level] % literal[int] keyword[and] identifier[char_level] < identifier[lowest_odd_level] :
identifier[lowest_odd_level] = identifier[char_level]
keyword[if] identifier[_ch] [ literal[string] ]== literal[string] keyword[or] identifier[idx] == identifier[max_len] - literal[int] :
identifier[line_end] = identifier[idx]
keyword[if] identifier[_ch] [ literal[string] ]== literal[string] :
identifier[line_end] -= literal[int]
identifier[reverse_contiguous_sequence] ( identifier[chars] , identifier[line_start] , identifier[line_end] ,
identifier[highest_level] , identifier[lowest_odd_level] )
identifier[line_start] = identifier[idx] + literal[int]
identifier[highest_level] = literal[int]
identifier[lowest_odd_level] = identifier[EXPLICIT_LEVEL_LIMIT]
keyword[if] identifier[debug] :
identifier[debug_storage] ( identifier[storage] ) | def reorder_resolved_levels(storage, debug):
"""L1 and L2 rules"""
# Applies L1.
should_reset = True
chars = storage['chars']
for _ch in chars[::-1]:
# L1. On each line, reset the embedding level of the following
# characters to the paragraph embedding level:
if _ch['orig'] in ('B', 'S'):
# 1. Segment separators,
# 2. Paragraph separators,
_ch['level'] = storage['base_level']
should_reset = True # depends on [control=['if'], data=[]]
elif should_reset and _ch['orig'] in ('BN', 'WS'):
# 3. Any sequence of whitespace characters preceding a segment
# separator or paragraph separator
# 4. Any sequence of white space characters at the end of the
# line.
_ch['level'] = storage['base_level'] # depends on [control=['if'], data=[]]
else:
should_reset = False # depends on [control=['for'], data=['_ch']]
max_len = len(chars)
# L2 should be per line
# Calculates highest level and loweset odd level on the fly.
line_start = line_end = 0
highest_level = 0
lowest_odd_level = EXPLICIT_LEVEL_LIMIT
for idx in range(max_len):
_ch = chars[idx]
# calc the levels
char_level = _ch['level']
if char_level > highest_level:
highest_level = char_level # depends on [control=['if'], data=['char_level', 'highest_level']]
if char_level % 2 and char_level < lowest_odd_level:
lowest_odd_level = char_level # depends on [control=['if'], data=[]]
if _ch['orig'] == 'B' or idx == max_len - 1:
line_end = idx
# omit line breaks
if _ch['orig'] == 'B':
line_end -= 1 # depends on [control=['if'], data=[]]
reverse_contiguous_sequence(chars, line_start, line_end, highest_level, lowest_odd_level)
# reset for next line run
line_start = idx + 1
highest_level = 0
lowest_odd_level = EXPLICIT_LEVEL_LIMIT # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['idx']]
if debug:
debug_storage(storage) # depends on [control=['if'], data=[]] |
def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
"""Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
else:
(data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data | def function[delete_free_shipping_coupon_by_id, parameter[cls, free_shipping_coupon_id]]:
constant[Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._delete_free_shipping_coupon_by_id_with_http_info, parameter[name[free_shipping_coupon_id]]]] | keyword[def] identifier[delete_free_shipping_coupon_by_id] ( identifier[cls] , identifier[free_shipping_coupon_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_delete_free_shipping_coupon_by_id_with_http_info] ( identifier[free_shipping_coupon_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_delete_free_shipping_coupon_by_id_with_http_info] ( identifier[free_shipping_coupon_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
"""Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data |
def find_inputs_and_params(node):
'''Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
'''
queue, seen, inputs, params = [node], set(), set(), set()
while queue:
node = queue.pop()
seen.add(node)
queue.extend(p for p in node.get_parents() if p not in seen)
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node)
elif not isinstance(node, TT.Constant):
inputs.add(node)
return list(inputs), list(params) | def function[find_inputs_and_params, parameter[node]]:
constant[Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
]
<ast.Tuple object at 0x7da1b1098910> assign[=] tuple[[<ast.List object at 0x7da1b109b040>, <ast.Call object at 0x7da1b1099780>, <ast.Call object at 0x7da1b1099f60>, <ast.Call object at 0x7da1b109a8f0>]]
while name[queue] begin[:]
variable[node] assign[=] call[name[queue].pop, parameter[]]
call[name[seen].add, parameter[name[node]]]
call[name[queue].extend, parameter[<ast.GeneratorExp object at 0x7da1b1099930>]]
if <ast.UnaryOp object at 0x7da1b1098b80> begin[:]
if call[name[isinstance], parameter[name[node], name[theano].compile.SharedVariable]] begin[:]
call[name[params].add, parameter[name[node]]]
return[tuple[[<ast.Call object at 0x7da1b1098520>, <ast.Call object at 0x7da1b10984f0>]]] | keyword[def] identifier[find_inputs_and_params] ( identifier[node] ):
literal[string]
identifier[queue] , identifier[seen] , identifier[inputs] , identifier[params] =[ identifier[node] ], identifier[set] (), identifier[set] (), identifier[set] ()
keyword[while] identifier[queue] :
identifier[node] = identifier[queue] . identifier[pop] ()
identifier[seen] . identifier[add] ( identifier[node] )
identifier[queue] . identifier[extend] ( identifier[p] keyword[for] identifier[p] keyword[in] identifier[node] . identifier[get_parents] () keyword[if] identifier[p] keyword[not] keyword[in] identifier[seen] )
keyword[if] keyword[not] identifier[node] . identifier[get_parents] ():
keyword[if] identifier[isinstance] ( identifier[node] , identifier[theano] . identifier[compile] . identifier[SharedVariable] ):
identifier[params] . identifier[add] ( identifier[node] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[node] , identifier[TT] . identifier[Constant] ):
identifier[inputs] . identifier[add] ( identifier[node] )
keyword[return] identifier[list] ( identifier[inputs] ), identifier[list] ( identifier[params] ) | def find_inputs_and_params(node):
"""Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
"""
(queue, seen, inputs, params) = ([node], set(), set(), set())
while queue:
node = queue.pop()
seen.add(node)
queue.extend((p for p in node.get_parents() if p not in seen))
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node) # depends on [control=['if'], data=[]]
elif not isinstance(node, TT.Constant):
inputs.add(node) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return (list(inputs), list(params)) |
def _create_lists(config, results, current, stack, inside_cartesian=None):
"""
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
"""
# Have we done it already?
try:
return results[current]
except KeyError:
pass
# Check recursion depth and detect loops
if current in stack:
raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack))
if len(stack) > 99:
raise ConfigurationError('Rule {!r} is too deep'.format(stack[0]))
# Track recursion depth
stack.append(current)
try:
# Check what kind of list we have
listdef = config[current]
list_type = listdef[_CONF.FIELD.TYPE]
# 1. List of words
if list_type == _CONF.TYPE.WORDS:
results[current] = WordList(listdef['words'])
# List of phrases
elif list_type == _CONF.TYPE.PHRASES:
results[current] = PhraseList(listdef['phrases'])
# 2. Simple list of lists
elif list_type == _CONF.TYPE.NESTED:
results[current] = NestedList([_create_lists(config, results, x, stack,
inside_cartesian=inside_cartesian)
for x in listdef[_CONF.FIELD.LISTS]])
# 3. Cartesian list of lists
elif list_type == _CONF.TYPE.CARTESIAN:
if inside_cartesian is not None:
raise ConfigurationError("Cartesian list {!r} contains another Cartesian list "
"{!r}. Nested Cartesian lists are not allowed."
.format(inside_cartesian, current))
results[current] = CartesianList([_create_lists(config, results, x, stack,
inside_cartesian=current)
for x in listdef[_CONF.FIELD.LISTS]])
# 4. Scalar
elif list_type == _CONF.TYPE.CONST:
results[current] = Scalar(listdef[_CONF.FIELD.VALUE])
# Unknown type
else:
raise InitializationError("Unknown list type: {!r}".format(list_type))
# Return the result
return results[current]
finally:
stack.pop() | def function[_create_lists, parameter[config, results, current, stack, inside_cartesian]]:
constant[
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
]
<ast.Try object at 0x7da20c6e4ee0>
if compare[name[current] in name[stack]] begin[:]
<ast.Raise object at 0x7da20c6e54e0>
if compare[call[name[len], parameter[name[stack]]] greater[>] constant[99]] begin[:]
<ast.Raise object at 0x7da20c6e7fd0>
call[name[stack].append, parameter[name[current]]]
<ast.Try object at 0x7da20c6e6140> | keyword[def] identifier[_create_lists] ( identifier[config] , identifier[results] , identifier[current] , identifier[stack] , identifier[inside_cartesian] = keyword[None] ):
literal[string]
keyword[try] :
keyword[return] identifier[results] [ identifier[current] ]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[if] identifier[current] keyword[in] identifier[stack] :
keyword[raise] identifier[ConfigurationError] ( literal[string] . identifier[format] ( identifier[stack] [ literal[int] ], identifier[stack] ))
keyword[if] identifier[len] ( identifier[stack] )> literal[int] :
keyword[raise] identifier[ConfigurationError] ( literal[string] . identifier[format] ( identifier[stack] [ literal[int] ]))
identifier[stack] . identifier[append] ( identifier[current] )
keyword[try] :
identifier[listdef] = identifier[config] [ identifier[current] ]
identifier[list_type] = identifier[listdef] [ identifier[_CONF] . identifier[FIELD] . identifier[TYPE] ]
keyword[if] identifier[list_type] == identifier[_CONF] . identifier[TYPE] . identifier[WORDS] :
identifier[results] [ identifier[current] ]= identifier[WordList] ( identifier[listdef] [ literal[string] ])
keyword[elif] identifier[list_type] == identifier[_CONF] . identifier[TYPE] . identifier[PHRASES] :
identifier[results] [ identifier[current] ]= identifier[PhraseList] ( identifier[listdef] [ literal[string] ])
keyword[elif] identifier[list_type] == identifier[_CONF] . identifier[TYPE] . identifier[NESTED] :
identifier[results] [ identifier[current] ]= identifier[NestedList] ([ identifier[_create_lists] ( identifier[config] , identifier[results] , identifier[x] , identifier[stack] ,
identifier[inside_cartesian] = identifier[inside_cartesian] )
keyword[for] identifier[x] keyword[in] identifier[listdef] [ identifier[_CONF] . identifier[FIELD] . identifier[LISTS] ]])
keyword[elif] identifier[list_type] == identifier[_CONF] . identifier[TYPE] . identifier[CARTESIAN] :
keyword[if] identifier[inside_cartesian] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string]
. identifier[format] ( identifier[inside_cartesian] , identifier[current] ))
identifier[results] [ identifier[current] ]= identifier[CartesianList] ([ identifier[_create_lists] ( identifier[config] , identifier[results] , identifier[x] , identifier[stack] ,
identifier[inside_cartesian] = identifier[current] )
keyword[for] identifier[x] keyword[in] identifier[listdef] [ identifier[_CONF] . identifier[FIELD] . identifier[LISTS] ]])
keyword[elif] identifier[list_type] == identifier[_CONF] . identifier[TYPE] . identifier[CONST] :
identifier[results] [ identifier[current] ]= identifier[Scalar] ( identifier[listdef] [ identifier[_CONF] . identifier[FIELD] . identifier[VALUE] ])
keyword[else] :
keyword[raise] identifier[InitializationError] ( literal[string] . identifier[format] ( identifier[list_type] ))
keyword[return] identifier[results] [ identifier[current] ]
keyword[finally] :
identifier[stack] . identifier[pop] () | def _create_lists(config, results, current, stack, inside_cartesian=None):
"""
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
"""
# Have we done it already?
try:
return results[current] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
# Check recursion depth and detect loops
if current in stack:
raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack)) # depends on [control=['if'], data=['stack']]
if len(stack) > 99:
raise ConfigurationError('Rule {!r} is too deep'.format(stack[0])) # depends on [control=['if'], data=[]]
# Track recursion depth
stack.append(current)
try:
# Check what kind of list we have
listdef = config[current]
list_type = listdef[_CONF.FIELD.TYPE]
# 1. List of words
if list_type == _CONF.TYPE.WORDS:
results[current] = WordList(listdef['words']) # depends on [control=['if'], data=[]]
# List of phrases
elif list_type == _CONF.TYPE.PHRASES:
results[current] = PhraseList(listdef['phrases']) # depends on [control=['if'], data=[]]
# 2. Simple list of lists
elif list_type == _CONF.TYPE.NESTED:
results[current] = NestedList([_create_lists(config, results, x, stack, inside_cartesian=inside_cartesian) for x in listdef[_CONF.FIELD.LISTS]]) # depends on [control=['if'], data=[]]
# 3. Cartesian list of lists
elif list_type == _CONF.TYPE.CARTESIAN:
if inside_cartesian is not None:
raise ConfigurationError('Cartesian list {!r} contains another Cartesian list {!r}. Nested Cartesian lists are not allowed.'.format(inside_cartesian, current)) # depends on [control=['if'], data=['inside_cartesian']]
results[current] = CartesianList([_create_lists(config, results, x, stack, inside_cartesian=current) for x in listdef[_CONF.FIELD.LISTS]]) # depends on [control=['if'], data=[]]
# 4. Scalar
elif list_type == _CONF.TYPE.CONST:
results[current] = Scalar(listdef[_CONF.FIELD.VALUE]) # depends on [control=['if'], data=[]]
else:
# Unknown type
raise InitializationError('Unknown list type: {!r}'.format(list_type))
# Return the result
return results[current] # depends on [control=['try'], data=[]]
finally:
stack.pop() |
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' | def function[generate, parameter[env]]:
constant[Add Builders and construction variables for lib to an Environment.]
call[name[SCons].Tool.createStaticLibBuilder, parameter[name[env]]]
call[name[msvc_setup_env_once], parameter[name[env]]]
call[name[env]][constant[AR]] assign[=] constant[lib]
call[name[env]][constant[ARFLAGS]] assign[=] call[name[SCons].Util.CLVar, parameter[constant[/nologo]]]
call[name[env]][constant[ARCOM]] assign[=] constant[${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}]
call[name[env]][constant[LIBPREFIX]] assign[=] constant[]
call[name[env]][constant[LIBSUFFIX]] assign[=] constant[.lib] | keyword[def] identifier[generate] ( identifier[env] ):
literal[string]
identifier[SCons] . identifier[Tool] . identifier[createStaticLibBuilder] ( identifier[env] )
identifier[msvc_setup_env_once] ( identifier[env] )
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= identifier[SCons] . identifier[Util] . identifier[CLVar] ( literal[string] )
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string] | def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' |
def ApproximateDistanceBetweenPoints(pa, pb):
"""Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
"""
alat, alon = pa
blat, blon = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb) | def function[ApproximateDistanceBetweenPoints, parameter[pa, pb]]:
constant[Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
]
<ast.Tuple object at 0x7da1b18308e0> assign[=] name[pa]
<ast.Tuple object at 0x7da1b1831c30> assign[=] name[pb]
variable[sa] assign[=] call[name[transitfeed].Stop, parameter[]]
variable[sb] assign[=] call[name[transitfeed].Stop, parameter[]]
return[call[name[transitfeed].ApproximateDistanceBetweenStops, parameter[name[sa], name[sb]]]] | keyword[def] identifier[ApproximateDistanceBetweenPoints] ( identifier[pa] , identifier[pb] ):
literal[string]
identifier[alat] , identifier[alon] = identifier[pa]
identifier[blat] , identifier[blon] = identifier[pb]
identifier[sa] = identifier[transitfeed] . identifier[Stop] ( identifier[lat] = identifier[alat] , identifier[lng] = identifier[alon] )
identifier[sb] = identifier[transitfeed] . identifier[Stop] ( identifier[lat] = identifier[blat] , identifier[lng] = identifier[blon] )
keyword[return] identifier[transitfeed] . identifier[ApproximateDistanceBetweenStops] ( identifier[sa] , identifier[sb] ) | def ApproximateDistanceBetweenPoints(pa, pb):
"""Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
"""
(alat, alon) = pa
(blat, blon) = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb) |
def spread(ctx, market, side, min, max, num, total, order_expiration, account):
""" Place multiple orders
\b
:param str market: Market pair quote:base (e.g. USD:BTS)
:param str side: ``buy`` or ``sell`` quote
:param float min: minimum price to place order at
:param float max: maximum price to place order at
:param int num: Number of orders to place
:param float total: Total amount of quote to use for all orders
:param int order_expiration: Number of seconds until the order expires from the books
"""
from tqdm import tqdm
from numpy import linspace
market = Market(market)
ctx.bitshares.bundle = True
if min < max:
space = linspace(min, max, num)
else:
space = linspace(max, min, num)
func = getattr(market, side)
for p in tqdm(space):
func(p, total / float(num), account=account, expiration=order_expiration)
print_tx(ctx.bitshares.txbuffer.broadcast()) | def function[spread, parameter[ctx, market, side, min, max, num, total, order_expiration, account]]:
constant[ Place multiple orders
:param str market: Market pair quote:base (e.g. USD:BTS)
:param str side: ``buy`` or ``sell`` quote
:param float min: minimum price to place order at
:param float max: maximum price to place order at
:param int num: Number of orders to place
:param float total: Total amount of quote to use for all orders
:param int order_expiration: Number of seconds until the order expires from the books
]
from relative_module[tqdm] import module[tqdm]
from relative_module[numpy] import module[linspace]
variable[market] assign[=] call[name[Market], parameter[name[market]]]
name[ctx].bitshares.bundle assign[=] constant[True]
if compare[name[min] less[<] name[max]] begin[:]
variable[space] assign[=] call[name[linspace], parameter[name[min], name[max], name[num]]]
variable[func] assign[=] call[name[getattr], parameter[name[market], name[side]]]
for taget[name[p]] in starred[call[name[tqdm], parameter[name[space]]]] begin[:]
call[name[func], parameter[name[p], binary_operation[name[total] / call[name[float], parameter[name[num]]]]]]
call[name[print_tx], parameter[call[name[ctx].bitshares.txbuffer.broadcast, parameter[]]]] | keyword[def] identifier[spread] ( identifier[ctx] , identifier[market] , identifier[side] , identifier[min] , identifier[max] , identifier[num] , identifier[total] , identifier[order_expiration] , identifier[account] ):
literal[string]
keyword[from] identifier[tqdm] keyword[import] identifier[tqdm]
keyword[from] identifier[numpy] keyword[import] identifier[linspace]
identifier[market] = identifier[Market] ( identifier[market] )
identifier[ctx] . identifier[bitshares] . identifier[bundle] = keyword[True]
keyword[if] identifier[min] < identifier[max] :
identifier[space] = identifier[linspace] ( identifier[min] , identifier[max] , identifier[num] )
keyword[else] :
identifier[space] = identifier[linspace] ( identifier[max] , identifier[min] , identifier[num] )
identifier[func] = identifier[getattr] ( identifier[market] , identifier[side] )
keyword[for] identifier[p] keyword[in] identifier[tqdm] ( identifier[space] ):
identifier[func] ( identifier[p] , identifier[total] / identifier[float] ( identifier[num] ), identifier[account] = identifier[account] , identifier[expiration] = identifier[order_expiration] )
identifier[print_tx] ( identifier[ctx] . identifier[bitshares] . identifier[txbuffer] . identifier[broadcast] ()) | def spread(ctx, market, side, min, max, num, total, order_expiration, account):
""" Place multiple orders
\x08
:param str market: Market pair quote:base (e.g. USD:BTS)
:param str side: ``buy`` or ``sell`` quote
:param float min: minimum price to place order at
:param float max: maximum price to place order at
:param int num: Number of orders to place
:param float total: Total amount of quote to use for all orders
:param int order_expiration: Number of seconds until the order expires from the books
"""
from tqdm import tqdm
from numpy import linspace
market = Market(market)
ctx.bitshares.bundle = True
if min < max:
space = linspace(min, max, num) # depends on [control=['if'], data=['min', 'max']]
else:
space = linspace(max, min, num)
func = getattr(market, side)
for p in tqdm(space):
func(p, total / float(num), account=account, expiration=order_expiration) # depends on [control=['for'], data=['p']]
print_tx(ctx.bitshares.txbuffer.broadcast()) |
def make_payment(self, *, reference_code, description, tx_value, tx_tax, tx_tax_return_base, currency, buyer,
payer, credit_card, payment_method, payment_country, device_session_id, ip_address, cookie,
user_agent, language=None, shipping_address=None, extra_parameters=None, notify_url=None,
transaction_type=TransactionType.AUTHORIZATION_AND_CAPTURE):
"""
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
"""
if not isinstance(payment_country, Country):
payment_country = Country(payment_country)
if not isinstance(transaction_type, TransactionType):
transaction_type = TransactionType(transaction_type)
if not isinstance(payment_method, Franchise):
payment_method = Franchise(payment_method)
if not isinstance(currency, Currency):
currency = Currency(currency)
franchises = get_available_franchise_for_payment(payment_country, transaction_type)
if not franchises or payment_method not in franchises:
fmt = 'The credit card franchise {} with transaction type {} is not available for {}.'
raise CVVRequiredError(fmt.format(payment_method.value, transaction_type.value, payment_country.name))
payload = {
"language": self.client.language.value,
"command": PaymentCommand.SUBMIT_TRANSACTION.value,
"merchant": {
"apiKey": self.client.api_key,
"apiLogin": self.client.api_login
},
"transaction": {
"order": {
"accountId": self.client.account_id,
"referenceCode": reference_code,
"description": description,
"language": language or self.client.language.value,
"signature": self.client._get_signature(reference_code, tx_value, currency.value),
"notifyUrl": notify_url,
"additionalValues": {
"TX_VALUE": {
"value": tx_value,
"currency": currency.value
},
"TX_TAX": {
"value": tx_tax,
"currency": currency.value
},
"TX_TAX_RETURN_BASE": {
"value": tx_tax_return_base,
"currency": currency.value
}
},
"buyer": buyer,
"shippingAddress": shipping_address
},
"payer": payer,
"creditCard": credit_card,
"extraParameters": extra_parameters,
"type": transaction_type.value,
"paymentMethod": payment_method.value,
"paymentCountry": payment_country.value,
"deviceSessionId": device_session_id,
"ipAddress": ip_address,
"cookie": cookie,
"userAgent": user_agent
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) | def function[make_payment, parameter[self]]:
constant[
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
]
if <ast.UnaryOp object at 0x7da18f09d2d0> begin[:]
variable[payment_country] assign[=] call[name[Country], parameter[name[payment_country]]]
if <ast.UnaryOp object at 0x7da2044c02b0> begin[:]
variable[transaction_type] assign[=] call[name[TransactionType], parameter[name[transaction_type]]]
if <ast.UnaryOp object at 0x7da2044c0d90> begin[:]
variable[payment_method] assign[=] call[name[Franchise], parameter[name[payment_method]]]
if <ast.UnaryOp object at 0x7da20eb29e70> begin[:]
variable[currency] assign[=] call[name[Currency], parameter[name[currency]]]
variable[franchises] assign[=] call[name[get_available_franchise_for_payment], parameter[name[payment_country], name[transaction_type]]]
if <ast.BoolOp object at 0x7da20c6c5c00> begin[:]
variable[fmt] assign[=] constant[The credit card franchise {} with transaction type {} is not available for {}.]
<ast.Raise object at 0x7da2044c1540>
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da2044c0b50>, <ast.Constant object at 0x7da2044c1f60>, <ast.Constant object at 0x7da2044c1810>, <ast.Constant object at 0x7da2044c0580>, <ast.Constant object at 0x7da2044c3100>], [<ast.Attribute object at 0x7da2044c1ff0>, <ast.Attribute object at 0x7da2044c3940>, <ast.Dict object at 0x7da207f99720>, <ast.Dict object at 0x7da207f9ae60>, <ast.Attribute object at 0x7da207f9af20>]]
return[call[name[self].client._post, parameter[name[self].url]]] | keyword[def] identifier[make_payment] ( identifier[self] ,*, identifier[reference_code] , identifier[description] , identifier[tx_value] , identifier[tx_tax] , identifier[tx_tax_return_base] , identifier[currency] , identifier[buyer] ,
identifier[payer] , identifier[credit_card] , identifier[payment_method] , identifier[payment_country] , identifier[device_session_id] , identifier[ip_address] , identifier[cookie] ,
identifier[user_agent] , identifier[language] = keyword[None] , identifier[shipping_address] = keyword[None] , identifier[extra_parameters] = keyword[None] , identifier[notify_url] = keyword[None] ,
identifier[transaction_type] = identifier[TransactionType] . identifier[AUTHORIZATION_AND_CAPTURE] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[payment_country] , identifier[Country] ):
identifier[payment_country] = identifier[Country] ( identifier[payment_country] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[transaction_type] , identifier[TransactionType] ):
identifier[transaction_type] = identifier[TransactionType] ( identifier[transaction_type] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[payment_method] , identifier[Franchise] ):
identifier[payment_method] = identifier[Franchise] ( identifier[payment_method] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[currency] , identifier[Currency] ):
identifier[currency] = identifier[Currency] ( identifier[currency] )
identifier[franchises] = identifier[get_available_franchise_for_payment] ( identifier[payment_country] , identifier[transaction_type] )
keyword[if] keyword[not] identifier[franchises] keyword[or] identifier[payment_method] keyword[not] keyword[in] identifier[franchises] :
identifier[fmt] = literal[string]
keyword[raise] identifier[CVVRequiredError] ( identifier[fmt] . identifier[format] ( identifier[payment_method] . identifier[value] , identifier[transaction_type] . identifier[value] , identifier[payment_country] . identifier[name] ))
identifier[payload] ={
literal[string] : identifier[self] . identifier[client] . identifier[language] . identifier[value] ,
literal[string] : identifier[PaymentCommand] . identifier[SUBMIT_TRANSACTION] . identifier[value] ,
literal[string] :{
literal[string] : identifier[self] . identifier[client] . identifier[api_key] ,
literal[string] : identifier[self] . identifier[client] . identifier[api_login]
},
literal[string] :{
literal[string] :{
literal[string] : identifier[self] . identifier[client] . identifier[account_id] ,
literal[string] : identifier[reference_code] ,
literal[string] : identifier[description] ,
literal[string] : identifier[language] keyword[or] identifier[self] . identifier[client] . identifier[language] . identifier[value] ,
literal[string] : identifier[self] . identifier[client] . identifier[_get_signature] ( identifier[reference_code] , identifier[tx_value] , identifier[currency] . identifier[value] ),
literal[string] : identifier[notify_url] ,
literal[string] :{
literal[string] :{
literal[string] : identifier[tx_value] ,
literal[string] : identifier[currency] . identifier[value]
},
literal[string] :{
literal[string] : identifier[tx_tax] ,
literal[string] : identifier[currency] . identifier[value]
},
literal[string] :{
literal[string] : identifier[tx_tax_return_base] ,
literal[string] : identifier[currency] . identifier[value]
}
},
literal[string] : identifier[buyer] ,
literal[string] : identifier[shipping_address]
},
literal[string] : identifier[payer] ,
literal[string] : identifier[credit_card] ,
literal[string] : identifier[extra_parameters] ,
literal[string] : identifier[transaction_type] . identifier[value] ,
literal[string] : identifier[payment_method] . identifier[value] ,
literal[string] : identifier[payment_country] . identifier[value] ,
literal[string] : identifier[device_session_id] ,
literal[string] : identifier[ip_address] ,
literal[string] : identifier[cookie] ,
literal[string] : identifier[user_agent]
},
literal[string] : identifier[self] . identifier[client] . identifier[is_test]
}
keyword[return] identifier[self] . identifier[client] . identifier[_post] ( identifier[self] . identifier[url] , identifier[json] = identifier[payload] ) | def make_payment(self, *, reference_code, description, tx_value, tx_tax, tx_tax_return_base, currency, buyer, payer, credit_card, payment_method, payment_country, device_session_id, ip_address, cookie, user_agent, language=None, shipping_address=None, extra_parameters=None, notify_url=None, transaction_type=TransactionType.AUTHORIZATION_AND_CAPTURE):
"""
Authorization: used to verify if a credit card is active, if it has funds, etc.
The transaction is not complete until a transaction capture is sent (only available for accounts in Argentina,
Brazil, Peru).
Capture: terminates a previously authorized transaction.
This is when the account makes a debit to the card (only available for accounts in Argentina, Brazil, Peru).
Authorization and capture: this is the most used type of transaction.
This option sends the transaction amount to authorization and if it is approved immediately capture is performed.
Args:
reference_code: The reference code of the order. It represents the identifier of the transaction
in the shop’s system.
Alphanumeric. Min: 1 Max: 255.
description: The description of the order.
Alphanumeric. Min: 1 Max: 255.
tx_value: TX_VALUE, it is the total amount of the transaction. It can contain two decimal digits.
For example 10000.00 and 10000.
Alphanumeric. 64.
tx_tax: TX_TAX, it is the value of the VAT (Value Added Tax only valid for Colombia) of the transaction,
if no VAT is sent, the system will apply 19% automatically. It can contain two decimal digits.
Example 19000.00. In case you have no VAT you should fill out 0.
Alphanumeric. 64.
tx_tax_return_base: TX_TAX_RETURN_BASE, it is the base value on which VAT (only valid for Colombia)
is calculated. If you do not have VAT should be sent to 0.
Alphanumeric. 64.
currency: The ISO currency code associated with the amount.
http://developers.payulatam.com/en/api/variables_table.html
Alphanumeric. 3.
buyer: Buyer’s shipping address.
Example.
{
"merchantBuyerId": "1",
"fullName": "First name and second buyer name",
"emailAddress": "buyer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"shippingAddress": {
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
payer: Payer’s data.
Example.
{
"merchantPayerId": "1",
"fullName": "First name and second payer name",
"emailAddress": "payer_test@test.com",
"contactPhone": "7563126",
"dniNumber": "5415668464654",
"billingAddress": {
"street1": "calle 93",
"street2": "125544",
"city": "Bogota",
"state": "Bogota DC",
"country": "CO",
"postalCode": "000000",
"phone": "7563126"
}
}
credit_card: Debit card’s data.
Example.
{
"number": "4097440000000004",
"securityCode": "321",
"expirationDate": "2022/12",
"name": "APPROVED"
}
payment_method: Payment method.
Alphanumeric. 32.
payment_country: Payment countries.
http://developers.payulatam.com/en/api/variables_table.html
device_session_id: The session identifier of the device where the transaction was performed from.
Alphanumeric. Max: 255.
ip_address: The IP address of the device where the transaction was performed from.
Alphanumeric. Max: 39.
cookie: The cookie stored on the device where the transaction was performed from.
Alphanumeric. Max: 255.
user_agent: The user agent of the browser from which the transaction was performed.
Alphanumeric. Max: 1024
language: The language used in the emails that are sent to the buyer and seller.
Alphanumeric. 2
shipping_address: The shipping address.
Example.
{
"street1": "calle 100",
"street2": "5555487",
"city": "Medellin",
"state": "Antioquia",
"country": "CO",
"postalCode": "0000000",
"phone": "7563126"
}
extra_parameters: Additional parameters or data associated with a transaction. These parameters may vary
according to the payment means or shop’s preferences.
Example.
{
"INSTALLMENTS_NUMBER": 1
}
notify_url: The URL notification or order confirmation.
Alphanumeric. Max: 2048.
transaction_type:
Returns:
"""
if not isinstance(payment_country, Country):
payment_country = Country(payment_country) # depends on [control=['if'], data=[]]
if not isinstance(transaction_type, TransactionType):
transaction_type = TransactionType(transaction_type) # depends on [control=['if'], data=[]]
if not isinstance(payment_method, Franchise):
payment_method = Franchise(payment_method) # depends on [control=['if'], data=[]]
if not isinstance(currency, Currency):
currency = Currency(currency) # depends on [control=['if'], data=[]]
franchises = get_available_franchise_for_payment(payment_country, transaction_type)
if not franchises or payment_method not in franchises:
fmt = 'The credit card franchise {} with transaction type {} is not available for {}.'
raise CVVRequiredError(fmt.format(payment_method.value, transaction_type.value, payment_country.name)) # depends on [control=['if'], data=[]]
payload = {'language': self.client.language.value, 'command': PaymentCommand.SUBMIT_TRANSACTION.value, 'merchant': {'apiKey': self.client.api_key, 'apiLogin': self.client.api_login}, 'transaction': {'order': {'accountId': self.client.account_id, 'referenceCode': reference_code, 'description': description, 'language': language or self.client.language.value, 'signature': self.client._get_signature(reference_code, tx_value, currency.value), 'notifyUrl': notify_url, 'additionalValues': {'TX_VALUE': {'value': tx_value, 'currency': currency.value}, 'TX_TAX': {'value': tx_tax, 'currency': currency.value}, 'TX_TAX_RETURN_BASE': {'value': tx_tax_return_base, 'currency': currency.value}}, 'buyer': buyer, 'shippingAddress': shipping_address}, 'payer': payer, 'creditCard': credit_card, 'extraParameters': extra_parameters, 'type': transaction_type.value, 'paymentMethod': payment_method.value, 'paymentCountry': payment_country.value, 'deviceSessionId': device_session_id, 'ipAddress': ip_address, 'cookie': cookie, 'userAgent': user_agent}, 'test': self.client.is_test}
return self.client._post(self.url, json=payload) |
def update(self, group: 'SentenceGroup', flags: Flags) -> None:
"""
This object is considered to be a "global" sentence group while the
other one is flags-specific. All data related to the specified flags
will be overwritten by the content of the specified group.
"""
to_append = []
for old, new in zip_longest(self.sentences, group.sentences):
if old is None:
old = Sentence()
to_append.append(old)
if new is None:
new = Sentence()
old.update(new, flags)
self.sentences.extend(to_append) | def function[update, parameter[self, group, flags]]:
constant[
This object is considered to be a "global" sentence group while the
other one is flags-specific. All data related to the specified flags
will be overwritten by the content of the specified group.
]
variable[to_append] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c990fd0>, <ast.Name object at 0x7da20c993760>]]] in starred[call[name[zip_longest], parameter[name[self].sentences, name[group].sentences]]] begin[:]
if compare[name[old] is constant[None]] begin[:]
variable[old] assign[=] call[name[Sentence], parameter[]]
call[name[to_append].append, parameter[name[old]]]
if compare[name[new] is constant[None]] begin[:]
variable[new] assign[=] call[name[Sentence], parameter[]]
call[name[old].update, parameter[name[new], name[flags]]]
call[name[self].sentences.extend, parameter[name[to_append]]] | keyword[def] identifier[update] ( identifier[self] , identifier[group] : literal[string] , identifier[flags] : identifier[Flags] )-> keyword[None] :
literal[string]
identifier[to_append] =[]
keyword[for] identifier[old] , identifier[new] keyword[in] identifier[zip_longest] ( identifier[self] . identifier[sentences] , identifier[group] . identifier[sentences] ):
keyword[if] identifier[old] keyword[is] keyword[None] :
identifier[old] = identifier[Sentence] ()
identifier[to_append] . identifier[append] ( identifier[old] )
keyword[if] identifier[new] keyword[is] keyword[None] :
identifier[new] = identifier[Sentence] ()
identifier[old] . identifier[update] ( identifier[new] , identifier[flags] )
identifier[self] . identifier[sentences] . identifier[extend] ( identifier[to_append] ) | def update(self, group: 'SentenceGroup', flags: Flags) -> None:
"""
This object is considered to be a "global" sentence group while the
other one is flags-specific. All data related to the specified flags
will be overwritten by the content of the specified group.
"""
to_append = []
for (old, new) in zip_longest(self.sentences, group.sentences):
if old is None:
old = Sentence()
to_append.append(old) # depends on [control=['if'], data=['old']]
if new is None:
new = Sentence() # depends on [control=['if'], data=['new']]
old.update(new, flags) # depends on [control=['for'], data=[]]
self.sentences.extend(to_append) |
def from_block(cls, block):
"""Instantiate this class given a raw block (see parse_raw)."""
rseqs = cma.realign_seqs(block)
records = (SeqRecord(Seq(rseq, extended_protein),
id=bseq['id'],
description=bseq['description'],
dbxrefs=bseq['dbxrefs'].values(), # list of strings
annotations=dict(
index=bseq['index'],
length=bseq['length'],
dbxrefs=bseq['dbxrefs'],
phylum=bseq['phylum'],
taxchar=bseq['taxchar'],
head_seq=bseq['head_seq'],
tail_seq=bseq['tail_seq'],
head_len=bseq['head_len'],
tail_len=bseq['tail_len'], # dict
),
# ENH: annotate with conservation levels
# letter_annotations=bseq['x'],
)
for bseq, rseq in zip(block['sequences'], rseqs))
return cls(records,
# CMA attributes
# block['one'],
block['level'],
block['name'],
block['params'],
block['query_length'],
block['query_chars'],
) | def function[from_block, parameter[cls, block]]:
constant[Instantiate this class given a raw block (see parse_raw).]
variable[rseqs] assign[=] call[name[cma].realign_seqs, parameter[name[block]]]
variable[records] assign[=] <ast.GeneratorExp object at 0x7da1b1ec1c30>
return[call[name[cls], parameter[name[records], call[name[block]][constant[level]], call[name[block]][constant[name]], call[name[block]][constant[params]], call[name[block]][constant[query_length]], call[name[block]][constant[query_chars]]]]] | keyword[def] identifier[from_block] ( identifier[cls] , identifier[block] ):
literal[string]
identifier[rseqs] = identifier[cma] . identifier[realign_seqs] ( identifier[block] )
identifier[records] =( identifier[SeqRecord] ( identifier[Seq] ( identifier[rseq] , identifier[extended_protein] ),
identifier[id] = identifier[bseq] [ literal[string] ],
identifier[description] = identifier[bseq] [ literal[string] ],
identifier[dbxrefs] = identifier[bseq] [ literal[string] ]. identifier[values] (),
identifier[annotations] = identifier[dict] (
identifier[index] = identifier[bseq] [ literal[string] ],
identifier[length] = identifier[bseq] [ literal[string] ],
identifier[dbxrefs] = identifier[bseq] [ literal[string] ],
identifier[phylum] = identifier[bseq] [ literal[string] ],
identifier[taxchar] = identifier[bseq] [ literal[string] ],
identifier[head_seq] = identifier[bseq] [ literal[string] ],
identifier[tail_seq] = identifier[bseq] [ literal[string] ],
identifier[head_len] = identifier[bseq] [ literal[string] ],
identifier[tail_len] = identifier[bseq] [ literal[string] ],
),
)
keyword[for] identifier[bseq] , identifier[rseq] keyword[in] identifier[zip] ( identifier[block] [ literal[string] ], identifier[rseqs] ))
keyword[return] identifier[cls] ( identifier[records] ,
identifier[block] [ literal[string] ],
identifier[block] [ literal[string] ],
identifier[block] [ literal[string] ],
identifier[block] [ literal[string] ],
identifier[block] [ literal[string] ],
) | def from_block(cls, block):
"""Instantiate this class given a raw block (see parse_raw)."""
rseqs = cma.realign_seqs(block) # list of strings
# dict
# ENH: annotate with conservation levels
# letter_annotations=bseq['x'],
records = (SeqRecord(Seq(rseq, extended_protein), id=bseq['id'], description=bseq['description'], dbxrefs=bseq['dbxrefs'].values(), annotations=dict(index=bseq['index'], length=bseq['length'], dbxrefs=bseq['dbxrefs'], phylum=bseq['phylum'], taxchar=bseq['taxchar'], head_seq=bseq['head_seq'], tail_seq=bseq['tail_seq'], head_len=bseq['head_len'], tail_len=bseq['tail_len'])) for (bseq, rseq) in zip(block['sequences'], rseqs))
# CMA attributes
# block['one'],
return cls(records, block['level'], block['name'], block['params'], block['query_length'], block['query_chars']) |
def OnStartup(self):
"""A handler that is called on client startup."""
# We read the transaction log and fail any requests that are in it. If there
# is anything in the transaction log we assume its there because we crashed
# last time and let the server know.
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
# Inform the server that we started.
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1) | def function[OnStartup, parameter[self]]:
constant[A handler that is called on client startup.]
variable[last_request] assign[=] call[name[self].transaction_log.Get, parameter[]]
if name[last_request] begin[:]
variable[status] assign[=] call[name[rdf_flows].GrrStatus, parameter[]]
if name[self].nanny_controller begin[:]
variable[nanny_status] assign[=] call[name[self].nanny_controller.GetNannyStatus, parameter[]]
if name[nanny_status] begin[:]
name[status].nanny_status assign[=] name[nanny_status]
call[name[self].SendReply, parameter[name[status]]]
call[name[self].transaction_log.Clear, parameter[]]
variable[action] assign[=] call[name[admin].SendStartupInfo, parameter[]]
call[name[action].Run, parameter[constant[None]]] | keyword[def] identifier[OnStartup] ( identifier[self] ):
literal[string]
identifier[last_request] = identifier[self] . identifier[transaction_log] . identifier[Get] ()
keyword[if] identifier[last_request] :
identifier[status] = identifier[rdf_flows] . identifier[GrrStatus] (
identifier[status] = identifier[rdf_flows] . identifier[GrrStatus] . identifier[ReturnedStatus] . identifier[CLIENT_KILLED] ,
identifier[error_message] = literal[string] )
keyword[if] identifier[self] . identifier[nanny_controller] :
identifier[nanny_status] = identifier[self] . identifier[nanny_controller] . identifier[GetNannyStatus] ()
keyword[if] identifier[nanny_status] :
identifier[status] . identifier[nanny_status] = identifier[nanny_status]
identifier[self] . identifier[SendReply] (
identifier[status] ,
identifier[request_id] = identifier[last_request] . identifier[request_id] ,
identifier[response_id] = literal[int] ,
identifier[session_id] = identifier[last_request] . identifier[session_id] ,
identifier[message_type] = identifier[rdf_flows] . identifier[GrrMessage] . identifier[Type] . identifier[STATUS] )
identifier[self] . identifier[transaction_log] . identifier[Clear] ()
identifier[action] = identifier[admin] . identifier[SendStartupInfo] ( identifier[grr_worker] = identifier[self] )
identifier[action] . identifier[Run] ( keyword[None] , identifier[ttl] = literal[int] ) | def OnStartup(self):
"""A handler that is called on client startup."""
# We read the transaction log and fail any requests that are in it. If there
# is anything in the transaction log we assume its there because we crashed
# last time and let the server know.
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED, error_message='Client killed during transaction')
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.SendReply(status, request_id=last_request.request_id, response_id=1, session_id=last_request.session_id, message_type=rdf_flows.GrrMessage.Type.STATUS) # depends on [control=['if'], data=[]]
self.transaction_log.Clear()
# Inform the server that we started.
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1) |
def as_dict(self, **extra):
"""
Converts all available emails to dictionaries.
:return: List of dictionaries.
"""
return [self._construct_email(email, **extra) for email in self.emails] | def function[as_dict, parameter[self]]:
constant[
Converts all available emails to dictionaries.
:return: List of dictionaries.
]
return[<ast.ListComp object at 0x7da1b0f41ab0>] | keyword[def] identifier[as_dict] ( identifier[self] ,** identifier[extra] ):
literal[string]
keyword[return] [ identifier[self] . identifier[_construct_email] ( identifier[email] ,** identifier[extra] ) keyword[for] identifier[email] keyword[in] identifier[self] . identifier[emails] ] | def as_dict(self, **extra):
"""
Converts all available emails to dictionaries.
:return: List of dictionaries.
"""
return [self._construct_email(email, **extra) for email in self.emails] |
def list_experiments(self, collection_name):
"""
List all experiments that belong to a collection.
Args:
collection_name (string): Name of the parent collection.
Returns:
(list)
Raises:
requests.HTTPError on failure.
"""
exp = ExperimentResource(
name='', collection_name=collection_name, coord_frame='foo')
return self._list_resource(exp) | def function[list_experiments, parameter[self, collection_name]]:
constant[
List all experiments that belong to a collection.
Args:
collection_name (string): Name of the parent collection.
Returns:
(list)
Raises:
requests.HTTPError on failure.
]
variable[exp] assign[=] call[name[ExperimentResource], parameter[]]
return[call[name[self]._list_resource, parameter[name[exp]]]] | keyword[def] identifier[list_experiments] ( identifier[self] , identifier[collection_name] ):
literal[string]
identifier[exp] = identifier[ExperimentResource] (
identifier[name] = literal[string] , identifier[collection_name] = identifier[collection_name] , identifier[coord_frame] = literal[string] )
keyword[return] identifier[self] . identifier[_list_resource] ( identifier[exp] ) | def list_experiments(self, collection_name):
"""
List all experiments that belong to a collection.
Args:
collection_name (string): Name of the parent collection.
Returns:
(list)
Raises:
requests.HTTPError on failure.
"""
exp = ExperimentResource(name='', collection_name=collection_name, coord_frame='foo')
return self._list_resource(exp) |
def get(self, receiver_id=None, event_id=None):
"""Handle GET request."""
event = self._get_event(receiver_id, event_id)
return make_response(event) | def function[get, parameter[self, receiver_id, event_id]]:
constant[Handle GET request.]
variable[event] assign[=] call[name[self]._get_event, parameter[name[receiver_id], name[event_id]]]
return[call[name[make_response], parameter[name[event]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[receiver_id] = keyword[None] , identifier[event_id] = keyword[None] ):
literal[string]
identifier[event] = identifier[self] . identifier[_get_event] ( identifier[receiver_id] , identifier[event_id] )
keyword[return] identifier[make_response] ( identifier[event] ) | def get(self, receiver_id=None, event_id=None):
"""Handle GET request."""
event = self._get_event(receiver_id, event_id)
return make_response(event) |
def _cromwell_debug(metadata):
"""Format Cromwell failures to make debugging easier.
"""
def get_failed_calls(cur, key=None):
if key is None: key = []
out = []
if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur:
out.append((key, cur))
elif isinstance(cur, dict):
for k, v in cur.items():
out.extend(get_failed_calls(v, key + [k]))
elif isinstance(cur, (list, tuple)):
for i, v in enumerate(cur):
out.extend(get_failed_calls(v, key + [i]))
return out
print("Failed bcbio Cromwell run")
print("-------------------------")
for fail_k, fail_call in get_failed_calls(metadata["calls"]):
root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"]))
print("Failure in step: %s" % ".".join([str(x) for x in fail_k]))
print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log"))
print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log",
"bcbio-nextgen-commands.log"))
print(" Cromwell directory : %s" % root_dir)
print() | def function[_cromwell_debug, parameter[metadata]]:
constant[Format Cromwell failures to make debugging easier.
]
def function[get_failed_calls, parameter[cur, key]]:
if compare[name[key] is constant[None]] begin[:]
variable[key] assign[=] list[[]]
variable[out] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c76cee0> begin[:]
call[name[out].append, parameter[tuple[[<ast.Name object at 0x7da20c76cd90>, <ast.Name object at 0x7da20c76de10>]]]]
return[name[out]]
call[name[print], parameter[constant[Failed bcbio Cromwell run]]]
call[name[print], parameter[constant[-------------------------]]]
for taget[tuple[[<ast.Name object at 0x7da20c76cf40>, <ast.Name object at 0x7da20c76eb30>]]] in starred[call[name[get_failed_calls], parameter[call[name[metadata]][constant[calls]]]]] begin[:]
variable[root_dir] assign[=] call[name[os].path.join, parameter[constant[cromwell_work], call[name[os].path.relpath, parameter[call[name[fail_call]][constant[callRoot]]]]]]
call[name[print], parameter[binary_operation[constant[Failure in step: %s] <ast.Mod object at 0x7da2590d6920> call[constant[.].join, parameter[<ast.ListComp object at 0x7da20c76cca0>]]]]]
call[name[print], parameter[binary_operation[constant[ bcbio log file : %s] <ast.Mod object at 0x7da2590d6920> call[name[os].path.join, parameter[name[root_dir], constant[execution], constant[log], constant[bcbio-nextgen-debug.log]]]]]]
call[name[print], parameter[binary_operation[constant[ bcbio commands file: %s] <ast.Mod object at 0x7da2590d6920> call[name[os].path.join, parameter[name[root_dir], constant[execution], constant[log], constant[bcbio-nextgen-commands.log]]]]]]
call[name[print], parameter[binary_operation[constant[ Cromwell directory : %s] <ast.Mod object at 0x7da2590d6920> name[root_dir]]]]
call[name[print], parameter[]] | keyword[def] identifier[_cromwell_debug] ( identifier[metadata] ):
literal[string]
keyword[def] identifier[get_failed_calls] ( identifier[cur] , identifier[key] = keyword[None] ):
keyword[if] identifier[key] keyword[is] keyword[None] : identifier[key] =[]
identifier[out] =[]
keyword[if] identifier[isinstance] ( identifier[cur] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[cur] keyword[and] literal[string] keyword[in] identifier[cur] :
identifier[out] . identifier[append] (( identifier[key] , identifier[cur] ))
keyword[elif] identifier[isinstance] ( identifier[cur] , identifier[dict] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[cur] . identifier[items] ():
identifier[out] . identifier[extend] ( identifier[get_failed_calls] ( identifier[v] , identifier[key] +[ identifier[k] ]))
keyword[elif] identifier[isinstance] ( identifier[cur] ,( identifier[list] , identifier[tuple] )):
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[cur] ):
identifier[out] . identifier[extend] ( identifier[get_failed_calls] ( identifier[v] , identifier[key] +[ identifier[i] ]))
keyword[return] identifier[out]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[for] identifier[fail_k] , identifier[fail_call] keyword[in] identifier[get_failed_calls] ( identifier[metadata] [ literal[string] ]):
identifier[root_dir] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[os] . identifier[path] . identifier[relpath] ( identifier[fail_call] [ literal[string] ]))
identifier[print] ( literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[fail_k] ]))
identifier[print] ( literal[string] % identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] , literal[string] , literal[string] ))
identifier[print] ( literal[string] % identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] , literal[string] ,
literal[string] ))
identifier[print] ( literal[string] % identifier[root_dir] )
identifier[print] () | def _cromwell_debug(metadata):
"""Format Cromwell failures to make debugging easier.
"""
def get_failed_calls(cur, key=None):
if key is None:
key = [] # depends on [control=['if'], data=['key']]
out = []
if isinstance(cur, dict) and 'failures' in cur and ('callRoot' in cur):
out.append((key, cur)) # depends on [control=['if'], data=[]]
elif isinstance(cur, dict):
for (k, v) in cur.items():
out.extend(get_failed_calls(v, key + [k])) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(cur, (list, tuple)):
for (i, v) in enumerate(cur):
out.extend(get_failed_calls(v, key + [i])) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return out
print('Failed bcbio Cromwell run')
print('-------------------------')
for (fail_k, fail_call) in get_failed_calls(metadata['calls']):
root_dir = os.path.join('cromwell_work', os.path.relpath(fail_call['callRoot']))
print('Failure in step: %s' % '.'.join([str(x) for x in fail_k]))
print(' bcbio log file : %s' % os.path.join(root_dir, 'execution', 'log', 'bcbio-nextgen-debug.log'))
print(' bcbio commands file: %s' % os.path.join(root_dir, 'execution', 'log', 'bcbio-nextgen-commands.log'))
print(' Cromwell directory : %s' % root_dir)
print() # depends on [control=['for'], data=[]] |
def get_mesh_name_from_web(mesh_id):
"""Get the MESH label for the given MESH ID using the NLM REST API.
Parameters
----------
mesh_id : str
MESH Identifier, e.g. 'D003094'.
Returns
-------
str
Label for the MESH ID, or None if the query failed or no label was
found.
"""
url = MESH_URL + mesh_id + '.json'
resp = requests.get(url)
if resp.status_code != 200:
return None
mesh_json = resp.json()
try:
label = mesh_json['@graph'][0]['label']['@value']
except (KeyError, IndexError) as e:
return None
return label | def function[get_mesh_name_from_web, parameter[mesh_id]]:
constant[Get the MESH label for the given MESH ID using the NLM REST API.
Parameters
----------
mesh_id : str
MESH Identifier, e.g. 'D003094'.
Returns
-------
str
Label for the MESH ID, or None if the query failed or no label was
found.
]
variable[url] assign[=] binary_operation[binary_operation[name[MESH_URL] + name[mesh_id]] + constant[.json]]
variable[resp] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[resp].status_code not_equal[!=] constant[200]] begin[:]
return[constant[None]]
variable[mesh_json] assign[=] call[name[resp].json, parameter[]]
<ast.Try object at 0x7da18dc9b280>
return[name[label]] | keyword[def] identifier[get_mesh_name_from_web] ( identifier[mesh_id] ):
literal[string]
identifier[url] = identifier[MESH_URL] + identifier[mesh_id] + literal[string]
identifier[resp] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] identifier[resp] . identifier[status_code] != literal[int] :
keyword[return] keyword[None]
identifier[mesh_json] = identifier[resp] . identifier[json] ()
keyword[try] :
identifier[label] = identifier[mesh_json] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[string] ]
keyword[except] ( identifier[KeyError] , identifier[IndexError] ) keyword[as] identifier[e] :
keyword[return] keyword[None]
keyword[return] identifier[label] | def get_mesh_name_from_web(mesh_id):
"""Get the MESH label for the given MESH ID using the NLM REST API.
Parameters
----------
mesh_id : str
MESH Identifier, e.g. 'D003094'.
Returns
-------
str
Label for the MESH ID, or None if the query failed or no label was
found.
"""
url = MESH_URL + mesh_id + '.json'
resp = requests.get(url)
if resp.status_code != 200:
return None # depends on [control=['if'], data=[]]
mesh_json = resp.json()
try:
label = mesh_json['@graph'][0]['label']['@value'] # depends on [control=['try'], data=[]]
except (KeyError, IndexError) as e:
return None # depends on [control=['except'], data=[]]
return label |
def rescale_image(
data, maxsizeb=4000000, dimen=None,
png2jpg=False, graying=True, reduceto=(600, 800)):
'''
若 ``png2jpg`` 为 ``True`` 则将图片转换为 ``JPEG`` 格式,所有透明像素被设置为
*白色* 。确保结果图片尺寸小于 ``maxsizeb`` 的约束限制。
如果 ``dimen`` 不为空,则生成一个相应约束的缩略图。依据 ``dimen`` 的类型,设置约束为
``width=dimen, height=dimen`` 或者 ``width, height = dimen``
:param data: 原始图片字节数据
:type data: bytes or io.BytesIO
:param int maxsizeb: 文件大小约束,单位:字节
:param dimen: 缩略图尺寸约束,宽&高
:type dimen: int or (int, int)
:param bool png2jpg: 是否将图片转换为 JPG 格式
:param bool graying: 是否将图片进行灰度处理
:param reduceto: 若图片大于此约束则进行相应缩小处理,宽&高
:type reduceto: (int, int)
:return: 处理后的图片字节数据,可直接以 ``wb`` 模式输出到文件中
:rtype: bytes
'''
if not isinstance(data, BytesIO):
data = BytesIO(data)
img = Image.open(data)
width, height = img.size
fmt = img.format
if graying and img.mode != "L":
img = img.convert("L")
reducewidth, reduceheight = reduceto
if dimen is not None:
if hasattr(dimen, '__len__'):
width, height = dimen
else:
width = height = dimen
img.thumbnail((width, height))
if png2jpg and fmt == 'PNG':
fmt = 'JPEG'
data = BytesIO()
img.save(data, fmt)
elif width > reducewidth or height > reduceheight:
ratio = min(
float(reducewidth) / float(width),
float(reduceheight) / float(height))
img = img.resize((
int(width * ratio), int(height * ratio)), Image.ANTIALIAS)
if png2jpg and fmt == 'PNG':
fmt = 'JPEG'
data = BytesIO()
img.save(data, fmt)
elif png2jpg and fmt == 'PNG':
data = BytesIO()
img.save(data, 'JPEG')
else:
data = BytesIO()
img.save(data, fmt)
return data.getvalue() | def function[rescale_image, parameter[data, maxsizeb, dimen, png2jpg, graying, reduceto]]:
constant[
若 ``png2jpg`` 为 ``True`` 则将图片转换为 ``JPEG`` 格式,所有透明像素被设置为
*白色* 。确保结果图片尺寸小于 ``maxsizeb`` 的约束限制。
如果 ``dimen`` 不为空,则生成一个相应约束的缩略图。依据 ``dimen`` 的类型,设置约束为
``width=dimen, height=dimen`` 或者 ``width, height = dimen``
:param data: 原始图片字节数据
:type data: bytes or io.BytesIO
:param int maxsizeb: 文件大小约束,单位:字节
:param dimen: 缩略图尺寸约束,宽&高
:type dimen: int or (int, int)
:param bool png2jpg: 是否将图片转换为 JPG 格式
:param bool graying: 是否将图片进行灰度处理
:param reduceto: 若图片大于此约束则进行相应缩小处理,宽&高
:type reduceto: (int, int)
:return: 处理后的图片字节数据,可直接以 ``wb`` 模式输出到文件中
:rtype: bytes
]
if <ast.UnaryOp object at 0x7da20c991990> begin[:]
variable[data] assign[=] call[name[BytesIO], parameter[name[data]]]
variable[img] assign[=] call[name[Image].open, parameter[name[data]]]
<ast.Tuple object at 0x7da20c991390> assign[=] name[img].size
variable[fmt] assign[=] name[img].format
if <ast.BoolOp object at 0x7da20c9932e0> begin[:]
variable[img] assign[=] call[name[img].convert, parameter[constant[L]]]
<ast.Tuple object at 0x7da20c990e80> assign[=] name[reduceto]
if compare[name[dimen] is_not constant[None]] begin[:]
if call[name[hasattr], parameter[name[dimen], constant[__len__]]] begin[:]
<ast.Tuple object at 0x7da20c991d80> assign[=] name[dimen]
call[name[img].thumbnail, parameter[tuple[[<ast.Name object at 0x7da20e956b00>, <ast.Name object at 0x7da20e954610>]]]]
if <ast.BoolOp object at 0x7da20e957df0> begin[:]
variable[fmt] assign[=] constant[JPEG]
variable[data] assign[=] call[name[BytesIO], parameter[]]
call[name[img].save, parameter[name[data], name[fmt]]]
return[call[name[data].getvalue, parameter[]]] | keyword[def] identifier[rescale_image] (
identifier[data] , identifier[maxsizeb] = literal[int] , identifier[dimen] = keyword[None] ,
identifier[png2jpg] = keyword[False] , identifier[graying] = keyword[True] , identifier[reduceto] =( literal[int] , literal[int] )):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[BytesIO] ):
identifier[data] = identifier[BytesIO] ( identifier[data] )
identifier[img] = identifier[Image] . identifier[open] ( identifier[data] )
identifier[width] , identifier[height] = identifier[img] . identifier[size]
identifier[fmt] = identifier[img] . identifier[format]
keyword[if] identifier[graying] keyword[and] identifier[img] . identifier[mode] != literal[string] :
identifier[img] = identifier[img] . identifier[convert] ( literal[string] )
identifier[reducewidth] , identifier[reduceheight] = identifier[reduceto]
keyword[if] identifier[dimen] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[dimen] , literal[string] ):
identifier[width] , identifier[height] = identifier[dimen]
keyword[else] :
identifier[width] = identifier[height] = identifier[dimen]
identifier[img] . identifier[thumbnail] (( identifier[width] , identifier[height] ))
keyword[if] identifier[png2jpg] keyword[and] identifier[fmt] == literal[string] :
identifier[fmt] = literal[string]
identifier[data] = identifier[BytesIO] ()
identifier[img] . identifier[save] ( identifier[data] , identifier[fmt] )
keyword[elif] identifier[width] > identifier[reducewidth] keyword[or] identifier[height] > identifier[reduceheight] :
identifier[ratio] = identifier[min] (
identifier[float] ( identifier[reducewidth] )/ identifier[float] ( identifier[width] ),
identifier[float] ( identifier[reduceheight] )/ identifier[float] ( identifier[height] ))
identifier[img] = identifier[img] . identifier[resize] ((
identifier[int] ( identifier[width] * identifier[ratio] ), identifier[int] ( identifier[height] * identifier[ratio] )), identifier[Image] . identifier[ANTIALIAS] )
keyword[if] identifier[png2jpg] keyword[and] identifier[fmt] == literal[string] :
identifier[fmt] = literal[string]
identifier[data] = identifier[BytesIO] ()
identifier[img] . identifier[save] ( identifier[data] , identifier[fmt] )
keyword[elif] identifier[png2jpg] keyword[and] identifier[fmt] == literal[string] :
identifier[data] = identifier[BytesIO] ()
identifier[img] . identifier[save] ( identifier[data] , literal[string] )
keyword[else] :
identifier[data] = identifier[BytesIO] ()
identifier[img] . identifier[save] ( identifier[data] , identifier[fmt] )
keyword[return] identifier[data] . identifier[getvalue] () | def rescale_image(data, maxsizeb=4000000, dimen=None, png2jpg=False, graying=True, reduceto=(600, 800)):
"""
若 ``png2jpg`` 为 ``True`` 则将图片转换为 ``JPEG`` 格式,所有透明像素被设置为
*白色* 。确保结果图片尺寸小于 ``maxsizeb`` 的约束限制。
如果 ``dimen`` 不为空,则生成一个相应约束的缩略图。依据 ``dimen`` 的类型,设置约束为
``width=dimen, height=dimen`` 或者 ``width, height = dimen``
:param data: 原始图片字节数据
:type data: bytes or io.BytesIO
:param int maxsizeb: 文件大小约束,单位:字节
:param dimen: 缩略图尺寸约束,宽&高
:type dimen: int or (int, int)
:param bool png2jpg: 是否将图片转换为 JPG 格式
:param bool graying: 是否将图片进行灰度处理
:param reduceto: 若图片大于此约束则进行相应缩小处理,宽&高
:type reduceto: (int, int)
:return: 处理后的图片字节数据,可直接以 ``wb`` 模式输出到文件中
:rtype: bytes
"""
if not isinstance(data, BytesIO):
data = BytesIO(data) # depends on [control=['if'], data=[]]
img = Image.open(data)
(width, height) = img.size
fmt = img.format
if graying and img.mode != 'L':
img = img.convert('L') # depends on [control=['if'], data=[]]
(reducewidth, reduceheight) = reduceto
if dimen is not None:
if hasattr(dimen, '__len__'):
(width, height) = dimen # depends on [control=['if'], data=[]]
else:
width = height = dimen
img.thumbnail((width, height))
if png2jpg and fmt == 'PNG':
fmt = 'JPEG' # depends on [control=['if'], data=[]]
data = BytesIO()
img.save(data, fmt) # depends on [control=['if'], data=['dimen']]
elif width > reducewidth or height > reduceheight:
ratio = min(float(reducewidth) / float(width), float(reduceheight) / float(height))
img = img.resize((int(width * ratio), int(height * ratio)), Image.ANTIALIAS)
if png2jpg and fmt == 'PNG':
fmt = 'JPEG' # depends on [control=['if'], data=[]]
data = BytesIO()
img.save(data, fmt) # depends on [control=['if'], data=[]]
elif png2jpg and fmt == 'PNG':
data = BytesIO()
img.save(data, 'JPEG') # depends on [control=['if'], data=[]]
else:
data = BytesIO()
img.save(data, fmt)
return data.getvalue() |
def update_scalingip(context, id, content):
"""Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_scalingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
requested_ports = content.get('ports', [])
flip = _update_flip(context, id, ip_types.SCALING, requested_ports)
return v._make_scaling_ip_dict(flip) | def function[update_scalingip, parameter[context, id, content]]:
constant[Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
]
call[name[LOG].info, parameter[binary_operation[constant[update_scalingip %s for tenant %s and body %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b00f7220>, <ast.Attribute object at 0x7da1b00f4d00>, <ast.Name object at 0x7da1b00f64d0>]]]]]
variable[requested_ports] assign[=] call[name[content].get, parameter[constant[ports], list[[]]]]
variable[flip] assign[=] call[name[_update_flip], parameter[name[context], name[id], name[ip_types].SCALING, name[requested_ports]]]
return[call[name[v]._make_scaling_ip_dict, parameter[name[flip]]]] | keyword[def] identifier[update_scalingip] ( identifier[context] , identifier[id] , identifier[content] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] %
( identifier[id] , identifier[context] . identifier[tenant_id] , identifier[content] ))
identifier[requested_ports] = identifier[content] . identifier[get] ( literal[string] ,[])
identifier[flip] = identifier[_update_flip] ( identifier[context] , identifier[id] , identifier[ip_types] . identifier[SCALING] , identifier[requested_ports] )
keyword[return] identifier[v] . identifier[_make_scaling_ip_dict] ( identifier[flip] ) | def update_scalingip(context, id, content):
"""Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_scalingip %s for tenant %s and body %s' % (id, context.tenant_id, content))
requested_ports = content.get('ports', [])
flip = _update_flip(context, id, ip_types.SCALING, requested_ports)
return v._make_scaling_ip_dict(flip) |
def search(self,q):
""" Search. """
import re
pattern = re.compile("%s" % q)
result = {}
for i in self.allstockno:
b = re.search(pattern, self.allstockno[i])
try:
b.group()
result[i] = self.allstockno[i]
except:
pass
return result | def function[search, parameter[self, q]]:
constant[ Search. ]
import module[re]
variable[pattern] assign[=] call[name[re].compile, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[q]]]]
variable[result] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[self].allstockno] begin[:]
variable[b] assign[=] call[name[re].search, parameter[name[pattern], call[name[self].allstockno][name[i]]]]
<ast.Try object at 0x7da18f810d00>
return[name[result]] | keyword[def] identifier[search] ( identifier[self] , identifier[q] ):
literal[string]
keyword[import] identifier[re]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] % identifier[q] )
identifier[result] ={}
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[allstockno] :
identifier[b] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[self] . identifier[allstockno] [ identifier[i] ])
keyword[try] :
identifier[b] . identifier[group] ()
identifier[result] [ identifier[i] ]= identifier[self] . identifier[allstockno] [ identifier[i] ]
keyword[except] :
keyword[pass]
keyword[return] identifier[result] | def search(self, q):
""" Search. """
import re
pattern = re.compile('%s' % q)
result = {}
for i in self.allstockno:
b = re.search(pattern, self.allstockno[i])
try:
b.group()
result[i] = self.allstockno[i] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']]
return result |
def mediation_analysis(data=None, x=None, m=None, y=None, covar=None,
alpha=0.05, n_boot=500, seed=None, return_dist=False):
"""Mediation analysis using a bias-correct non-parametric bootstrap method.
Parameters
----------
data : pd.DataFrame
Dataframe.
x : str
Column name in data containing the predictor variable.
The predictor variable must be continuous.
m : str or list of str
Column name(s) in data containing the mediator variable(s).
The mediator(s) can be continuous or binary (e.g. 0 or 1).
This function supports multiple parallel mediators.
y : str
Column name in data containing the outcome variable.
The outcome variable must be continuous.
covar : None, str, or list
Covariate(s). If not None, the specified covariate(s) will be included
in all regressions.
alpha : float
Significance threshold. Used to determine the confidence interval,
CI = [ alpha / 2 ; 1 - alpha / 2]
n_boot : int
Number of bootstrap iterations for confidence intervals and p-values
estimation. The greater, the slower.
seed : int or None
Random state seed.
return_dist : bool
If True, the function also returns the indirect bootstrapped beta
samples (size = n_boot). Can be plotted for instance using
:py:func:`seaborn.distplot()` or :py:func:`seaborn.kdeplot()`
functions.
Returns
-------
stats : pd.DataFrame
Mediation summary::
'path' : regression model
'coef' : regression estimates
'se' : standard error
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
'pval' : two-sided p-values
'sig' : statistical significance
Notes
-----
Mediation analysis is a "statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y (i.e., X → Y) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable (i.e., X → M → Y)"
(from Fiedler et al. 2011).
The **indirect effect** (also referred to as average causal mediation
effect or ACME) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one-unit change in X through a sequence of
causal steps in which X affects M, which in turn affects Y.
It is considered significant if the specified confidence interval does not
include 0. The path 'X --> Y' is the sum of both the indirect and direct
effect. It is sometimes referred to as total effect. For more details,
please refer to Fiedler et al 2011 or Hayes and Rockwood 2017.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous (binary). Note
that this function also supports parallel multiple mediators: "in such
models, mediators may be and often are correlated, but nothing in the
model allows one mediator to causally influence another."
(Hayes and Rockwood 2017)
This function wll only work well if the outcome variable is continuous.
It does not support binary or ordinal outcome variable. For more
advanced mediation models, please refer to the `lavaan` or `mediation` R
packages, or the PROCESS macro for SPSS.
The two-sided p-value of the indirect effect is computed using the
bootstrap distribution, as in the mediation R package. However, the p-value
should be interpreted with caution since it is a) not constructed
conditioned on a true null hypothesis (see Hayes and Rockwood 2017) and b)
varies depending on the number of bootstrap samples and the random seed.
Note that rows with NaN are automatically removed.
Results have been tested against the R mediation package and this tutorial
https://data.library.virginia.edu/introduction-to-mediation-analysis/
References
----------
.. [1] Baron, R. M. & Kenny, D. A. The moderator–mediator variable
distinction in social psychological research: Conceptual, strategic,
and statistical considerations. J. Pers. Soc. Psychol. 51, 1173–1182
(1986).
.. [2] Fiedler, K., Schott, M. & Meiser, T. What mediation analysis can
(not) do. J. Exp. Soc. Psychol. 47, 1231–1236 (2011).
.. [3] Hayes, A. F. & Rockwood, N. J. Regression-based statistical
mediation and moderation analysis in clinical research:
Observations, recommendations, and implementation. Behav. Res.
Ther. 98, 39–57 (2017).
.. [4] https://cran.r-project.org/web/packages/mediation/mediation.pdf
.. [5] http://lavaan.ugent.be/tutorial/mediation.html
.. [6] https://github.com/rmill040/pymediation
Examples
--------
1. Simple mediation analysis
>>> from pingouin import mediation_analysis, read_dataset
>>> df = read_dataset('mediation')
>>> mediation_analysis(data=df, x='X', m='M', y='Y', alpha=0.05, seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Y ~ M 0.6542 0.0858 1.612674e-11 0.4838 0.8245 Yes
2 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
3 Direct 0.0396 0.1096 7.187429e-01 -0.1780 0.2572 No
4 Indirect 0.3565 0.0833 0.000000e+00 0.2198 0.5377 Yes
2. Return the indirect bootstrapped beta coefficients
>>> stats, dist = mediation_analysis(data=df, x='X', m='M', y='Y',
... return_dist=True)
>>> print(dist.shape)
(500,)
3. Mediation analysis with a binary mediator variable
>>> mediation_analysis(data=df, x='X', m='Mbin', y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 Mbin ~ X -0.0205 0.1159 0.859392 -0.2476 0.2066 No
1 Y ~ Mbin -0.1354 0.4118 0.743076 -0.9525 0.6818 No
2 Total 0.3961 0.1112 0.000567 0.1755 0.6167 Yes
3 Direct 0.3956 0.1117 0.000614 0.1739 0.6173 Yes
4 Indirect 0.0023 0.0495 0.960000 -0.0715 0.1441 No
4. Mediation analysis with covariates
>>> mediation_analysis(data=df, x='X', m='M', y='Y',
... covar=['Mbin', 'Ybin'], seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5594 0.0968 9.394635e-08 0.3672 0.7516 Yes
1 Y ~ M 0.6660 0.0861 1.017261e-11 0.4951 0.8368 Yes
2 Total 0.4204 0.1129 3.324252e-04 0.1962 0.6446 Yes
3 Direct 0.0645 0.1104 5.608583e-01 -0.1548 0.2837 No
4 Indirect 0.3559 0.0865 0.000000e+00 0.2093 0.5530 Yes
5. Mediation analysis with multiple parallel mediators
>>> mediation_analysis(data=df, x='X', m=['M', 'Mbin'], y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Mbin ~ X -0.0051 0.0290 8.592408e-01 -0.0626 0.0523 No
2 Y ~ M 0.6537 0.0863 2.118163e-11 0.4824 0.8250 Yes
3 Y ~ Mbin -0.0640 0.3282 8.456998e-01 -0.7154 0.5873 No
4 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
5 Direct 0.0395 0.1102 7.206301e-01 -0.1792 0.2583 No
6 Indirect M 0.3563 0.0845 0.000000e+00 0.2148 0.5385 Yes
7 Indirect Mbin 0.0003 0.0097 9.520000e-01 -0.0172 0.0252 No
"""
# Sanity check
assert isinstance(x, str), 'y must be a string.'
assert isinstance(y, str), 'y must be a string.'
assert isinstance(m, (list, str)), 'Mediator(s) must be a list or string.'
assert isinstance(covar, (type(None), str, list))
if isinstance(m, str):
m = [m]
n_mediator = len(m)
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame.'
# Check for duplicates
assert n_mediator == len(set(m)), 'Cannot have duplicates mediators.'
if isinstance(covar, str):
covar = [covar]
if isinstance(covar, list):
assert len(covar) == len(set(covar)), 'Cannot have duplicates covar.'
assert set(m).isdisjoint(covar), 'Mediator cannot be in covar.'
# Check that columns are in dataframe
columns = _fl([x, m, y, covar])
keys = data.columns
assert all([c in keys for c in columns]), 'Column(s) are not in DataFrame.'
# Check that columns are numeric
err_msg = "Columns must be numeric or boolean."
assert all([data[c].dtype.kind in 'bfi' for c in columns]), err_msg
# Drop rows with NAN Values
data = data[columns].dropna()
n = data.shape[0]
assert n > 5, 'DataFrame must have at least 5 samples (rows).'
# Check if mediator is binary
mtype = 'logistic' if all(data[m].nunique() == 2) else 'linear'
# Name of CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Compute regressions
cols = ['names', 'coef', 'se', 'pval', ll_name, ul_name]
# For speed, we pass np.array instead of pandas DataFrame
X_val = data[_fl([x, covar])].values # X + covar as predictors
XM_val = data[_fl([x, m, covar])].values # X + M + covar as predictors
M_val = data[m].values # M as target (no covariates)
y_val = data[y].values # y as target (no covariates)
# M(j) ~ X + covar
sxm = {}
for idx, j in enumerate(m):
if mtype == 'linear':
sxm[j] = linear_regression(X_val, M_val[:, idx],
alpha=alpha).loc[[1], cols]
else:
sxm[j] = logistic_regression(X_val, M_val[:, idx],
alpha=alpha).loc[[1], cols]
sxm[j].loc[1, 'names'] = '%s ~ X' % j
sxm = pd.concat(sxm, ignore_index=True)
# Y ~ M + covar
smy = linear_regression(data[_fl([m, covar])], y_val,
alpha=alpha).loc[1:n_mediator, cols]
# Average Total Effects (Y ~ X + covar)
sxy = linear_regression(X_val, y_val, alpha=alpha).loc[[1], cols]
# Average Direct Effects (Y ~ X + M + covar)
direct = linear_regression(XM_val, y_val, alpha=alpha).loc[[1], cols]
# Rename paths
smy['names'] = smy['names'].apply(lambda x: 'Y ~ %s' % x)
direct.loc[1, 'names'] = 'Direct'
sxy.loc[1, 'names'] = 'Total'
# Concatenate and create sig column
stats = pd.concat((sxm, smy, sxy, direct), ignore_index=True)
stats['sig'] = np.where(stats['pval'] < alpha, 'Yes', 'No')
# Bootstrap confidence intervals
rng = np.random.RandomState(seed)
idx = rng.choice(np.arange(n), replace=True, size=(n_boot, n))
ab_estimates = np.zeros(shape=(n_boot, n_mediator))
for i in range(n_boot):
ab_estimates[i, :] = _point_estimate(X_val, XM_val, M_val, y_val,
idx[i, :], n_mediator, mtype)
ab = _point_estimate(X_val, XM_val, M_val, y_val, np.arange(n),
n_mediator, mtype)
indirect = {'names': m, 'coef': ab, 'se': ab_estimates.std(ddof=1, axis=0),
'pval': [], ll_name: [], ul_name: [], 'sig': []}
for j in range(n_mediator):
ci_j = _bca(ab_estimates[:, j], indirect['coef'][j],
alpha=alpha, n_boot=n_boot)
indirect[ll_name].append(min(ci_j))
indirect[ul_name].append(max(ci_j))
# Bootstrapped p-value of indirect effect
# Note that this is less accurate than a permutation test because the
# bootstrap distribution is not conditioned on a true null hypothesis.
# For more details see Hayes and Rockwood 2017
indirect['pval'].append(_pval_from_bootci(ab_estimates[:, j],
indirect['coef'][j]))
indirect['sig'].append('Yes' if indirect['pval'][j] < alpha else 'No')
# Create output dataframe
indirect = pd.DataFrame.from_dict(indirect)
if n_mediator == 1:
indirect['names'] = 'Indirect'
else:
indirect['names'] = indirect['names'].apply(lambda x:
'Indirect %s' % x)
stats = stats.append(indirect, ignore_index=True)
stats = stats.rename(columns={'names': 'path'})
# Round
col_to_round = ['coef', 'se', ll_name, ul_name]
stats[col_to_round] = stats[col_to_round].round(4)
if return_dist:
return stats, np.squeeze(ab_estimates)
else:
return stats | def function[mediation_analysis, parameter[data, x, m, y, covar, alpha, n_boot, seed, return_dist]]:
constant[Mediation analysis using a bias-correct non-parametric bootstrap method.
Parameters
----------
data : pd.DataFrame
Dataframe.
x : str
Column name in data containing the predictor variable.
The predictor variable must be continuous.
m : str or list of str
Column name(s) in data containing the mediator variable(s).
The mediator(s) can be continuous or binary (e.g. 0 or 1).
This function supports multiple parallel mediators.
y : str
Column name in data containing the outcome variable.
The outcome variable must be continuous.
covar : None, str, or list
Covariate(s). If not None, the specified covariate(s) will be included
in all regressions.
alpha : float
Significance threshold. Used to determine the confidence interval,
CI = [ alpha / 2 ; 1 - alpha / 2]
n_boot : int
Number of bootstrap iterations for confidence intervals and p-values
estimation. The greater, the slower.
seed : int or None
Random state seed.
return_dist : bool
If True, the function also returns the indirect bootstrapped beta
samples (size = n_boot). Can be plotted for instance using
:py:func:`seaborn.distplot()` or :py:func:`seaborn.kdeplot()`
functions.
Returns
-------
stats : pd.DataFrame
Mediation summary::
'path' : regression model
'coef' : regression estimates
'se' : standard error
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
'pval' : two-sided p-values
'sig' : statistical significance
Notes
-----
Mediation analysis is a "statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y (i.e., X → Y) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable (i.e., X → M → Y)"
(from Fiedler et al. 2011).
The **indirect effect** (also referred to as average causal mediation
effect or ACME) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one-unit change in X through a sequence of
causal steps in which X affects M, which in turn affects Y.
It is considered significant if the specified confidence interval does not
include 0. The path 'X --> Y' is the sum of both the indirect and direct
effect. It is sometimes referred to as total effect. For more details,
please refer to Fiedler et al 2011 or Hayes and Rockwood 2017.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous (binary). Note
that this function also supports parallel multiple mediators: "in such
models, mediators may be and often are correlated, but nothing in the
model allows one mediator to causally influence another."
(Hayes and Rockwood 2017)
This function wll only work well if the outcome variable is continuous.
It does not support binary or ordinal outcome variable. For more
advanced mediation models, please refer to the `lavaan` or `mediation` R
packages, or the PROCESS macro for SPSS.
The two-sided p-value of the indirect effect is computed using the
bootstrap distribution, as in the mediation R package. However, the p-value
should be interpreted with caution since it is a) not constructed
conditioned on a true null hypothesis (see Hayes and Rockwood 2017) and b)
varies depending on the number of bootstrap samples and the random seed.
Note that rows with NaN are automatically removed.
Results have been tested against the R mediation package and this tutorial
https://data.library.virginia.edu/introduction-to-mediation-analysis/
References
----------
.. [1] Baron, R. M. & Kenny, D. A. The moderator–mediator variable
distinction in social psychological research: Conceptual, strategic,
and statistical considerations. J. Pers. Soc. Psychol. 51, 1173–1182
(1986).
.. [2] Fiedler, K., Schott, M. & Meiser, T. What mediation analysis can
(not) do. J. Exp. Soc. Psychol. 47, 1231–1236 (2011).
.. [3] Hayes, A. F. & Rockwood, N. J. Regression-based statistical
mediation and moderation analysis in clinical research:
Observations, recommendations, and implementation. Behav. Res.
Ther. 98, 39–57 (2017).
.. [4] https://cran.r-project.org/web/packages/mediation/mediation.pdf
.. [5] http://lavaan.ugent.be/tutorial/mediation.html
.. [6] https://github.com/rmill040/pymediation
Examples
--------
1. Simple mediation analysis
>>> from pingouin import mediation_analysis, read_dataset
>>> df = read_dataset('mediation')
>>> mediation_analysis(data=df, x='X', m='M', y='Y', alpha=0.05, seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Y ~ M 0.6542 0.0858 1.612674e-11 0.4838 0.8245 Yes
2 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
3 Direct 0.0396 0.1096 7.187429e-01 -0.1780 0.2572 No
4 Indirect 0.3565 0.0833 0.000000e+00 0.2198 0.5377 Yes
2. Return the indirect bootstrapped beta coefficients
>>> stats, dist = mediation_analysis(data=df, x='X', m='M', y='Y',
... return_dist=True)
>>> print(dist.shape)
(500,)
3. Mediation analysis with a binary mediator variable
>>> mediation_analysis(data=df, x='X', m='Mbin', y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 Mbin ~ X -0.0205 0.1159 0.859392 -0.2476 0.2066 No
1 Y ~ Mbin -0.1354 0.4118 0.743076 -0.9525 0.6818 No
2 Total 0.3961 0.1112 0.000567 0.1755 0.6167 Yes
3 Direct 0.3956 0.1117 0.000614 0.1739 0.6173 Yes
4 Indirect 0.0023 0.0495 0.960000 -0.0715 0.1441 No
4. Mediation analysis with covariates
>>> mediation_analysis(data=df, x='X', m='M', y='Y',
... covar=['Mbin', 'Ybin'], seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5594 0.0968 9.394635e-08 0.3672 0.7516 Yes
1 Y ~ M 0.6660 0.0861 1.017261e-11 0.4951 0.8368 Yes
2 Total 0.4204 0.1129 3.324252e-04 0.1962 0.6446 Yes
3 Direct 0.0645 0.1104 5.608583e-01 -0.1548 0.2837 No
4 Indirect 0.3559 0.0865 0.000000e+00 0.2093 0.5530 Yes
5. Mediation analysis with multiple parallel mediators
>>> mediation_analysis(data=df, x='X', m=['M', 'Mbin'], y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Mbin ~ X -0.0051 0.0290 8.592408e-01 -0.0626 0.0523 No
2 Y ~ M 0.6537 0.0863 2.118163e-11 0.4824 0.8250 Yes
3 Y ~ Mbin -0.0640 0.3282 8.456998e-01 -0.7154 0.5873 No
4 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
5 Direct 0.0395 0.1102 7.206301e-01 -0.1792 0.2583 No
6 Indirect M 0.3563 0.0845 0.000000e+00 0.2148 0.5385 Yes
7 Indirect Mbin 0.0003 0.0097 9.520000e-01 -0.0172 0.0252 No
]
assert[call[name[isinstance], parameter[name[x], name[str]]]]
assert[call[name[isinstance], parameter[name[y], name[str]]]]
assert[call[name[isinstance], parameter[name[m], tuple[[<ast.Name object at 0x7da18ede6050>, <ast.Name object at 0x7da18ede5810>]]]]]
assert[call[name[isinstance], parameter[name[covar], tuple[[<ast.Call object at 0x7da18ede5b40>, <ast.Name object at 0x7da18ede7fd0>, <ast.Name object at 0x7da18ede65f0>]]]]]
if call[name[isinstance], parameter[name[m], name[str]]] begin[:]
variable[m] assign[=] list[[<ast.Name object at 0x7da18ede5960>]]
variable[n_mediator] assign[=] call[name[len], parameter[name[m]]]
assert[call[name[isinstance], parameter[name[data], name[pd].DataFrame]]]
assert[compare[name[n_mediator] equal[==] call[name[len], parameter[call[name[set], parameter[name[m]]]]]]]
if call[name[isinstance], parameter[name[covar], name[str]]] begin[:]
variable[covar] assign[=] list[[<ast.Name object at 0x7da18ede7e80>]]
if call[name[isinstance], parameter[name[covar], name[list]]] begin[:]
assert[compare[call[name[len], parameter[name[covar]]] equal[==] call[name[len], parameter[call[name[set], parameter[name[covar]]]]]]]
assert[call[call[name[set], parameter[name[m]]].isdisjoint, parameter[name[covar]]]]
variable[columns] assign[=] call[name[_fl], parameter[list[[<ast.Name object at 0x7da18ede5e40>, <ast.Name object at 0x7da18ede4ac0>, <ast.Name object at 0x7da18ede45e0>, <ast.Name object at 0x7da18ede7ee0>]]]]
variable[keys] assign[=] name[data].columns
assert[call[name[all], parameter[<ast.ListComp object at 0x7da18ede6c80>]]]
variable[err_msg] assign[=] constant[Columns must be numeric or boolean.]
assert[call[name[all], parameter[<ast.ListComp object at 0x7da18ede5780>]]]
variable[data] assign[=] call[call[name[data]][name[columns]].dropna, parameter[]]
variable[n] assign[=] call[name[data].shape][constant[0]]
assert[compare[name[n] greater[>] constant[5]]]
variable[mtype] assign[=] <ast.IfExp object at 0x7da18ede7190>
variable[ll_name] assign[=] binary_operation[constant[CI[%.1f%%]] <ast.Mod object at 0x7da2590d6920> binary_operation[binary_operation[constant[100] * name[alpha]] / constant[2]]]
variable[ul_name] assign[=] binary_operation[constant[CI[%.1f%%]] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[100] * binary_operation[constant[1] - binary_operation[name[alpha] / constant[2]]]]]
variable[cols] assign[=] list[[<ast.Constant object at 0x7da18ede56f0>, <ast.Constant object at 0x7da18ede77c0>, <ast.Constant object at 0x7da18ede5180>, <ast.Constant object at 0x7da18ede5330>, <ast.Name object at 0x7da18ede57e0>, <ast.Name object at 0x7da18ede5360>]]
variable[X_val] assign[=] call[name[data]][call[name[_fl], parameter[list[[<ast.Name object at 0x7da18ede7b80>, <ast.Name object at 0x7da18ede71f0>]]]]].values
variable[XM_val] assign[=] call[name[data]][call[name[_fl], parameter[list[[<ast.Name object at 0x7da18ede5900>, <ast.Name object at 0x7da18ede7eb0>, <ast.Name object at 0x7da18ede5660>]]]]].values
variable[M_val] assign[=] call[name[data]][name[m]].values
variable[y_val] assign[=] call[name[data]][name[y]].values
variable[sxm] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18ede7f70>, <ast.Name object at 0x7da18ede6410>]]] in starred[call[name[enumerate], parameter[name[m]]]] begin[:]
if compare[name[mtype] equal[==] constant[linear]] begin[:]
call[name[sxm]][name[j]] assign[=] call[call[name[linear_regression], parameter[name[X_val], call[name[M_val]][tuple[[<ast.Slice object at 0x7da18ede53f0>, <ast.Name object at 0x7da18ede79d0>]]]]].loc][tuple[[<ast.List object at 0x7da18ede5270>, <ast.Name object at 0x7da18ede48e0>]]]
call[call[name[sxm]][name[j]].loc][tuple[[<ast.Constant object at 0x7da18ede4b50>, <ast.Constant object at 0x7da18ede48b0>]]] assign[=] binary_operation[constant[%s ~ X] <ast.Mod object at 0x7da2590d6920> name[j]]
variable[sxm] assign[=] call[name[pd].concat, parameter[name[sxm]]]
variable[smy] assign[=] call[call[name[linear_regression], parameter[call[name[data]][call[name[_fl], parameter[list[[<ast.Name object at 0x7da2054a7910>, <ast.Name object at 0x7da2054a5e10>]]]]], name[y_val]]].loc][tuple[[<ast.Slice object at 0x7da2054a4e80>, <ast.Name object at 0x7da2054a58a0>]]]
variable[sxy] assign[=] call[call[name[linear_regression], parameter[name[X_val], name[y_val]]].loc][tuple[[<ast.List object at 0x7da2054a7190>, <ast.Name object at 0x7da2054a42b0>]]]
variable[direct] assign[=] call[call[name[linear_regression], parameter[name[XM_val], name[y_val]]].loc][tuple[[<ast.List object at 0x7da2054a6b60>, <ast.Name object at 0x7da2054a6560>]]]
call[name[smy]][constant[names]] assign[=] call[call[name[smy]][constant[names]].apply, parameter[<ast.Lambda object at 0x7da2054a4af0>]]
call[name[direct].loc][tuple[[<ast.Constant object at 0x7da2054a7130>, <ast.Constant object at 0x7da2054a5930>]]] assign[=] constant[Direct]
call[name[sxy].loc][tuple[[<ast.Constant object at 0x7da2054a5cc0>, <ast.Constant object at 0x7da2054a57e0>]]] assign[=] constant[Total]
variable[stats] assign[=] call[name[pd].concat, parameter[tuple[[<ast.Name object at 0x7da2054a5c90>, <ast.Name object at 0x7da2054a4520>, <ast.Name object at 0x7da2054a7490>, <ast.Name object at 0x7da2054a7df0>]]]]
call[name[stats]][constant[sig]] assign[=] call[name[np].where, parameter[compare[call[name[stats]][constant[pval]] less[<] name[alpha]], constant[Yes], constant[No]]]
variable[rng] assign[=] call[name[np].random.RandomState, parameter[name[seed]]]
variable[idx] assign[=] call[name[rng].choice, parameter[call[name[np].arange, parameter[name[n]]]]]
variable[ab_estimates] assign[=] call[name[np].zeros, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_boot]]]] begin[:]
call[name[ab_estimates]][tuple[[<ast.Name object at 0x7da2054a6e00>, <ast.Slice object at 0x7da2054a67a0>]]] assign[=] call[name[_point_estimate], parameter[name[X_val], name[XM_val], name[M_val], name[y_val], call[name[idx]][tuple[[<ast.Name object at 0x7da2054a7b20>, <ast.Slice object at 0x7da2054a4f40>]]], name[n_mediator], name[mtype]]]
variable[ab] assign[=] call[name[_point_estimate], parameter[name[X_val], name[XM_val], name[M_val], name[y_val], call[name[np].arange, parameter[name[n]]], name[n_mediator], name[mtype]]]
variable[indirect] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5270>, <ast.Constant object at 0x7da2054a55d0>, <ast.Constant object at 0x7da2054a4820>, <ast.Constant object at 0x7da2054a55a0>, <ast.Name object at 0x7da2054a68f0>, <ast.Name object at 0x7da2054a54b0>, <ast.Constant object at 0x7da2054a60b0>], [<ast.Name object at 0x7da2054a6b00>, <ast.Name object at 0x7da2054a7f40>, <ast.Call object at 0x7da2054a4370>, <ast.List object at 0x7da2054a78e0>, <ast.List object at 0x7da2054a7070>, <ast.List object at 0x7da2054a45b0>, <ast.List object at 0x7da2054a7dc0>]]
for taget[name[j]] in starred[call[name[range], parameter[name[n_mediator]]]] begin[:]
variable[ci_j] assign[=] call[name[_bca], parameter[call[name[ab_estimates]][tuple[[<ast.Slice object at 0x7da2054a4130>, <ast.Name object at 0x7da2054a7c70>]]], call[call[name[indirect]][constant[coef]]][name[j]]]]
call[call[name[indirect]][name[ll_name]].append, parameter[call[name[min], parameter[name[ci_j]]]]]
call[call[name[indirect]][name[ul_name]].append, parameter[call[name[max], parameter[name[ci_j]]]]]
call[call[name[indirect]][constant[pval]].append, parameter[call[name[_pval_from_bootci], parameter[call[name[ab_estimates]][tuple[[<ast.Slice object at 0x7da2054a69b0>, <ast.Name object at 0x7da2054a4d30>]]], call[call[name[indirect]][constant[coef]]][name[j]]]]]]
call[call[name[indirect]][constant[sig]].append, parameter[<ast.IfExp object at 0x7da2054a7eb0>]]
variable[indirect] assign[=] call[name[pd].DataFrame.from_dict, parameter[name[indirect]]]
if compare[name[n_mediator] equal[==] constant[1]] begin[:]
call[name[indirect]][constant[names]] assign[=] constant[Indirect]
variable[stats] assign[=] call[name[stats].append, parameter[name[indirect]]]
variable[stats] assign[=] call[name[stats].rename, parameter[]]
variable[col_to_round] assign[=] list[[<ast.Constant object at 0x7da20e9617e0>, <ast.Constant object at 0x7da20e9619c0>, <ast.Name object at 0x7da20e962bc0>, <ast.Name object at 0x7da20e962830>]]
call[name[stats]][name[col_to_round]] assign[=] call[call[name[stats]][name[col_to_round]].round, parameter[constant[4]]]
if name[return_dist] begin[:]
return[tuple[[<ast.Name object at 0x7da20e9633a0>, <ast.Call object at 0x7da20e960be0>]]] | keyword[def] identifier[mediation_analysis] ( identifier[data] = keyword[None] , identifier[x] = keyword[None] , identifier[m] = keyword[None] , identifier[y] = keyword[None] , identifier[covar] = keyword[None] ,
identifier[alpha] = literal[int] , identifier[n_boot] = literal[int] , identifier[seed] = keyword[None] , identifier[return_dist] = keyword[False] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[x] , identifier[str] ), literal[string]
keyword[assert] identifier[isinstance] ( identifier[y] , identifier[str] ), literal[string]
keyword[assert] identifier[isinstance] ( identifier[m] ,( identifier[list] , identifier[str] )), literal[string]
keyword[assert] identifier[isinstance] ( identifier[covar] ,( identifier[type] ( keyword[None] ), identifier[str] , identifier[list] ))
keyword[if] identifier[isinstance] ( identifier[m] , identifier[str] ):
identifier[m] =[ identifier[m] ]
identifier[n_mediator] = identifier[len] ( identifier[m] )
keyword[assert] identifier[isinstance] ( identifier[data] , identifier[pd] . identifier[DataFrame] ), literal[string]
keyword[assert] identifier[n_mediator] == identifier[len] ( identifier[set] ( identifier[m] )), literal[string]
keyword[if] identifier[isinstance] ( identifier[covar] , identifier[str] ):
identifier[covar] =[ identifier[covar] ]
keyword[if] identifier[isinstance] ( identifier[covar] , identifier[list] ):
keyword[assert] identifier[len] ( identifier[covar] )== identifier[len] ( identifier[set] ( identifier[covar] )), literal[string]
keyword[assert] identifier[set] ( identifier[m] ). identifier[isdisjoint] ( identifier[covar] ), literal[string]
identifier[columns] = identifier[_fl] ([ identifier[x] , identifier[m] , identifier[y] , identifier[covar] ])
identifier[keys] = identifier[data] . identifier[columns]
keyword[assert] identifier[all] ([ identifier[c] keyword[in] identifier[keys] keyword[for] identifier[c] keyword[in] identifier[columns] ]), literal[string]
identifier[err_msg] = literal[string]
keyword[assert] identifier[all] ([ identifier[data] [ identifier[c] ]. identifier[dtype] . identifier[kind] keyword[in] literal[string] keyword[for] identifier[c] keyword[in] identifier[columns] ]), identifier[err_msg]
identifier[data] = identifier[data] [ identifier[columns] ]. identifier[dropna] ()
identifier[n] = identifier[data] . identifier[shape] [ literal[int] ]
keyword[assert] identifier[n] > literal[int] , literal[string]
identifier[mtype] = literal[string] keyword[if] identifier[all] ( identifier[data] [ identifier[m] ]. identifier[nunique] ()== literal[int] ) keyword[else] literal[string]
identifier[ll_name] = literal[string] %( literal[int] * identifier[alpha] / literal[int] )
identifier[ul_name] = literal[string] %( literal[int] *( literal[int] - identifier[alpha] / literal[int] ))
identifier[cols] =[ literal[string] , literal[string] , literal[string] , literal[string] , identifier[ll_name] , identifier[ul_name] ]
identifier[X_val] = identifier[data] [ identifier[_fl] ([ identifier[x] , identifier[covar] ])]. identifier[values]
identifier[XM_val] = identifier[data] [ identifier[_fl] ([ identifier[x] , identifier[m] , identifier[covar] ])]. identifier[values]
identifier[M_val] = identifier[data] [ identifier[m] ]. identifier[values]
identifier[y_val] = identifier[data] [ identifier[y] ]. identifier[values]
identifier[sxm] ={}
keyword[for] identifier[idx] , identifier[j] keyword[in] identifier[enumerate] ( identifier[m] ):
keyword[if] identifier[mtype] == literal[string] :
identifier[sxm] [ identifier[j] ]= identifier[linear_regression] ( identifier[X_val] , identifier[M_val] [:, identifier[idx] ],
identifier[alpha] = identifier[alpha] ). identifier[loc] [[ literal[int] ], identifier[cols] ]
keyword[else] :
identifier[sxm] [ identifier[j] ]= identifier[logistic_regression] ( identifier[X_val] , identifier[M_val] [:, identifier[idx] ],
identifier[alpha] = identifier[alpha] ). identifier[loc] [[ literal[int] ], identifier[cols] ]
identifier[sxm] [ identifier[j] ]. identifier[loc] [ literal[int] , literal[string] ]= literal[string] % identifier[j]
identifier[sxm] = identifier[pd] . identifier[concat] ( identifier[sxm] , identifier[ignore_index] = keyword[True] )
identifier[smy] = identifier[linear_regression] ( identifier[data] [ identifier[_fl] ([ identifier[m] , identifier[covar] ])], identifier[y_val] ,
identifier[alpha] = identifier[alpha] ). identifier[loc] [ literal[int] : identifier[n_mediator] , identifier[cols] ]
identifier[sxy] = identifier[linear_regression] ( identifier[X_val] , identifier[y_val] , identifier[alpha] = identifier[alpha] ). identifier[loc] [[ literal[int] ], identifier[cols] ]
identifier[direct] = identifier[linear_regression] ( identifier[XM_val] , identifier[y_val] , identifier[alpha] = identifier[alpha] ). identifier[loc] [[ literal[int] ], identifier[cols] ]
identifier[smy] [ literal[string] ]= identifier[smy] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : literal[string] % identifier[x] )
identifier[direct] . identifier[loc] [ literal[int] , literal[string] ]= literal[string]
identifier[sxy] . identifier[loc] [ literal[int] , literal[string] ]= literal[string]
identifier[stats] = identifier[pd] . identifier[concat] (( identifier[sxm] , identifier[smy] , identifier[sxy] , identifier[direct] ), identifier[ignore_index] = keyword[True] )
identifier[stats] [ literal[string] ]= identifier[np] . identifier[where] ( identifier[stats] [ literal[string] ]< identifier[alpha] , literal[string] , literal[string] )
identifier[rng] = identifier[np] . identifier[random] . identifier[RandomState] ( identifier[seed] )
identifier[idx] = identifier[rng] . identifier[choice] ( identifier[np] . identifier[arange] ( identifier[n] ), identifier[replace] = keyword[True] , identifier[size] =( identifier[n_boot] , identifier[n] ))
identifier[ab_estimates] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[n_boot] , identifier[n_mediator] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_boot] ):
identifier[ab_estimates] [ identifier[i] ,:]= identifier[_point_estimate] ( identifier[X_val] , identifier[XM_val] , identifier[M_val] , identifier[y_val] ,
identifier[idx] [ identifier[i] ,:], identifier[n_mediator] , identifier[mtype] )
identifier[ab] = identifier[_point_estimate] ( identifier[X_val] , identifier[XM_val] , identifier[M_val] , identifier[y_val] , identifier[np] . identifier[arange] ( identifier[n] ),
identifier[n_mediator] , identifier[mtype] )
identifier[indirect] ={ literal[string] : identifier[m] , literal[string] : identifier[ab] , literal[string] : identifier[ab_estimates] . identifier[std] ( identifier[ddof] = literal[int] , identifier[axis] = literal[int] ),
literal[string] :[], identifier[ll_name] :[], identifier[ul_name] :[], literal[string] :[]}
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n_mediator] ):
identifier[ci_j] = identifier[_bca] ( identifier[ab_estimates] [:, identifier[j] ], identifier[indirect] [ literal[string] ][ identifier[j] ],
identifier[alpha] = identifier[alpha] , identifier[n_boot] = identifier[n_boot] )
identifier[indirect] [ identifier[ll_name] ]. identifier[append] ( identifier[min] ( identifier[ci_j] ))
identifier[indirect] [ identifier[ul_name] ]. identifier[append] ( identifier[max] ( identifier[ci_j] ))
identifier[indirect] [ literal[string] ]. identifier[append] ( identifier[_pval_from_bootci] ( identifier[ab_estimates] [:, identifier[j] ],
identifier[indirect] [ literal[string] ][ identifier[j] ]))
identifier[indirect] [ literal[string] ]. identifier[append] ( literal[string] keyword[if] identifier[indirect] [ literal[string] ][ identifier[j] ]< identifier[alpha] keyword[else] literal[string] )
identifier[indirect] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] ( identifier[indirect] )
keyword[if] identifier[n_mediator] == literal[int] :
identifier[indirect] [ literal[string] ]= literal[string]
keyword[else] :
identifier[indirect] [ literal[string] ]= identifier[indirect] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] :
literal[string] % identifier[x] )
identifier[stats] = identifier[stats] . identifier[append] ( identifier[indirect] , identifier[ignore_index] = keyword[True] )
identifier[stats] = identifier[stats] . identifier[rename] ( identifier[columns] ={ literal[string] : literal[string] })
identifier[col_to_round] =[ literal[string] , literal[string] , identifier[ll_name] , identifier[ul_name] ]
identifier[stats] [ identifier[col_to_round] ]= identifier[stats] [ identifier[col_to_round] ]. identifier[round] ( literal[int] )
keyword[if] identifier[return_dist] :
keyword[return] identifier[stats] , identifier[np] . identifier[squeeze] ( identifier[ab_estimates] )
keyword[else] :
keyword[return] identifier[stats] | def mediation_analysis(data=None, x=None, m=None, y=None, covar=None, alpha=0.05, n_boot=500, seed=None, return_dist=False):
"""Mediation analysis using a bias-correct non-parametric bootstrap method.
Parameters
----------
data : pd.DataFrame
Dataframe.
x : str
Column name in data containing the predictor variable.
The predictor variable must be continuous.
m : str or list of str
Column name(s) in data containing the mediator variable(s).
The mediator(s) can be continuous or binary (e.g. 0 or 1).
This function supports multiple parallel mediators.
y : str
Column name in data containing the outcome variable.
The outcome variable must be continuous.
covar : None, str, or list
Covariate(s). If not None, the specified covariate(s) will be included
in all regressions.
alpha : float
Significance threshold. Used to determine the confidence interval,
CI = [ alpha / 2 ; 1 - alpha / 2]
n_boot : int
Number of bootstrap iterations for confidence intervals and p-values
estimation. The greater, the slower.
seed : int or None
Random state seed.
return_dist : bool
If True, the function also returns the indirect bootstrapped beta
samples (size = n_boot). Can be plotted for instance using
:py:func:`seaborn.distplot()` or :py:func:`seaborn.kdeplot()`
functions.
Returns
-------
stats : pd.DataFrame
Mediation summary::
'path' : regression model
'coef' : regression estimates
'se' : standard error
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
'pval' : two-sided p-values
'sig' : statistical significance
Notes
-----
Mediation analysis is a "statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y (i.e., X → Y) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable (i.e., X → M → Y)"
(from Fiedler et al. 2011).
The **indirect effect** (also referred to as average causal mediation
effect or ACME) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one-unit change in X through a sequence of
causal steps in which X affects M, which in turn affects Y.
It is considered significant if the specified confidence interval does not
include 0. The path 'X --> Y' is the sum of both the indirect and direct
effect. It is sometimes referred to as total effect. For more details,
please refer to Fiedler et al 2011 or Hayes and Rockwood 2017.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous (binary). Note
that this function also supports parallel multiple mediators: "in such
models, mediators may be and often are correlated, but nothing in the
model allows one mediator to causally influence another."
(Hayes and Rockwood 2017)
This function wll only work well if the outcome variable is continuous.
It does not support binary or ordinal outcome variable. For more
advanced mediation models, please refer to the `lavaan` or `mediation` R
packages, or the PROCESS macro for SPSS.
The two-sided p-value of the indirect effect is computed using the
bootstrap distribution, as in the mediation R package. However, the p-value
should be interpreted with caution since it is a) not constructed
conditioned on a true null hypothesis (see Hayes and Rockwood 2017) and b)
varies depending on the number of bootstrap samples and the random seed.
Note that rows with NaN are automatically removed.
Results have been tested against the R mediation package and this tutorial
https://data.library.virginia.edu/introduction-to-mediation-analysis/
References
----------
.. [1] Baron, R. M. & Kenny, D. A. The moderator–mediator variable
distinction in social psychological research: Conceptual, strategic,
and statistical considerations. J. Pers. Soc. Psychol. 51, 1173–1182
(1986).
.. [2] Fiedler, K., Schott, M. & Meiser, T. What mediation analysis can
(not) do. J. Exp. Soc. Psychol. 47, 1231–1236 (2011).
.. [3] Hayes, A. F. & Rockwood, N. J. Regression-based statistical
mediation and moderation analysis in clinical research:
Observations, recommendations, and implementation. Behav. Res.
Ther. 98, 39–57 (2017).
.. [4] https://cran.r-project.org/web/packages/mediation/mediation.pdf
.. [5] http://lavaan.ugent.be/tutorial/mediation.html
.. [6] https://github.com/rmill040/pymediation
Examples
--------
1. Simple mediation analysis
>>> from pingouin import mediation_analysis, read_dataset
>>> df = read_dataset('mediation')
>>> mediation_analysis(data=df, x='X', m='M', y='Y', alpha=0.05, seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Y ~ M 0.6542 0.0858 1.612674e-11 0.4838 0.8245 Yes
2 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
3 Direct 0.0396 0.1096 7.187429e-01 -0.1780 0.2572 No
4 Indirect 0.3565 0.0833 0.000000e+00 0.2198 0.5377 Yes
2. Return the indirect bootstrapped beta coefficients
>>> stats, dist = mediation_analysis(data=df, x='X', m='M', y='Y',
... return_dist=True)
>>> print(dist.shape)
(500,)
3. Mediation analysis with a binary mediator variable
>>> mediation_analysis(data=df, x='X', m='Mbin', y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 Mbin ~ X -0.0205 0.1159 0.859392 -0.2476 0.2066 No
1 Y ~ Mbin -0.1354 0.4118 0.743076 -0.9525 0.6818 No
2 Total 0.3961 0.1112 0.000567 0.1755 0.6167 Yes
3 Direct 0.3956 0.1117 0.000614 0.1739 0.6173 Yes
4 Indirect 0.0023 0.0495 0.960000 -0.0715 0.1441 No
4. Mediation analysis with covariates
>>> mediation_analysis(data=df, x='X', m='M', y='Y',
... covar=['Mbin', 'Ybin'], seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5594 0.0968 9.394635e-08 0.3672 0.7516 Yes
1 Y ~ M 0.6660 0.0861 1.017261e-11 0.4951 0.8368 Yes
2 Total 0.4204 0.1129 3.324252e-04 0.1962 0.6446 Yes
3 Direct 0.0645 0.1104 5.608583e-01 -0.1548 0.2837 No
4 Indirect 0.3559 0.0865 0.000000e+00 0.2093 0.5530 Yes
5. Mediation analysis with multiple parallel mediators
>>> mediation_analysis(data=df, x='X', m=['M', 'Mbin'], y='Y', seed=42)
path coef se pval CI[2.5%] CI[97.5%] sig
0 M ~ X 0.5610 0.0945 4.391362e-08 0.3735 0.7485 Yes
1 Mbin ~ X -0.0051 0.0290 8.592408e-01 -0.0626 0.0523 No
2 Y ~ M 0.6537 0.0863 2.118163e-11 0.4824 0.8250 Yes
3 Y ~ Mbin -0.0640 0.3282 8.456998e-01 -0.7154 0.5873 No
4 Total 0.3961 0.1112 5.671128e-04 0.1755 0.6167 Yes
5 Direct 0.0395 0.1102 7.206301e-01 -0.1792 0.2583 No
6 Indirect M 0.3563 0.0845 0.000000e+00 0.2148 0.5385 Yes
7 Indirect Mbin 0.0003 0.0097 9.520000e-01 -0.0172 0.0252 No
"""
# Sanity check
assert isinstance(x, str), 'y must be a string.'
assert isinstance(y, str), 'y must be a string.'
assert isinstance(m, (list, str)), 'Mediator(s) must be a list or string.'
assert isinstance(covar, (type(None), str, list))
if isinstance(m, str):
m = [m] # depends on [control=['if'], data=[]]
n_mediator = len(m)
assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame.'
# Check for duplicates
assert n_mediator == len(set(m)), 'Cannot have duplicates mediators.'
if isinstance(covar, str):
covar = [covar] # depends on [control=['if'], data=[]]
if isinstance(covar, list):
assert len(covar) == len(set(covar)), 'Cannot have duplicates covar.'
assert set(m).isdisjoint(covar), 'Mediator cannot be in covar.' # depends on [control=['if'], data=[]]
# Check that columns are in dataframe
columns = _fl([x, m, y, covar])
keys = data.columns
assert all([c in keys for c in columns]), 'Column(s) are not in DataFrame.'
# Check that columns are numeric
err_msg = 'Columns must be numeric or boolean.'
assert all([data[c].dtype.kind in 'bfi' for c in columns]), err_msg
# Drop rows with NAN Values
data = data[columns].dropna()
n = data.shape[0]
assert n > 5, 'DataFrame must have at least 5 samples (rows).'
# Check if mediator is binary
mtype = 'logistic' if all(data[m].nunique() == 2) else 'linear'
# Name of CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Compute regressions
cols = ['names', 'coef', 'se', 'pval', ll_name, ul_name]
# For speed, we pass np.array instead of pandas DataFrame
X_val = data[_fl([x, covar])].values # X + covar as predictors
XM_val = data[_fl([x, m, covar])].values # X + M + covar as predictors
M_val = data[m].values # M as target (no covariates)
y_val = data[y].values # y as target (no covariates)
# M(j) ~ X + covar
sxm = {}
for (idx, j) in enumerate(m):
if mtype == 'linear':
sxm[j] = linear_regression(X_val, M_val[:, idx], alpha=alpha).loc[[1], cols] # depends on [control=['if'], data=[]]
else:
sxm[j] = logistic_regression(X_val, M_val[:, idx], alpha=alpha).loc[[1], cols]
sxm[j].loc[1, 'names'] = '%s ~ X' % j # depends on [control=['for'], data=[]]
sxm = pd.concat(sxm, ignore_index=True)
# Y ~ M + covar
smy = linear_regression(data[_fl([m, covar])], y_val, alpha=alpha).loc[1:n_mediator, cols]
# Average Total Effects (Y ~ X + covar)
sxy = linear_regression(X_val, y_val, alpha=alpha).loc[[1], cols]
# Average Direct Effects (Y ~ X + M + covar)
direct = linear_regression(XM_val, y_val, alpha=alpha).loc[[1], cols]
# Rename paths
smy['names'] = smy['names'].apply(lambda x: 'Y ~ %s' % x)
direct.loc[1, 'names'] = 'Direct'
sxy.loc[1, 'names'] = 'Total'
# Concatenate and create sig column
stats = pd.concat((sxm, smy, sxy, direct), ignore_index=True)
stats['sig'] = np.where(stats['pval'] < alpha, 'Yes', 'No')
# Bootstrap confidence intervals
rng = np.random.RandomState(seed)
idx = rng.choice(np.arange(n), replace=True, size=(n_boot, n))
ab_estimates = np.zeros(shape=(n_boot, n_mediator))
for i in range(n_boot):
ab_estimates[i, :] = _point_estimate(X_val, XM_val, M_val, y_val, idx[i, :], n_mediator, mtype) # depends on [control=['for'], data=['i']]
ab = _point_estimate(X_val, XM_val, M_val, y_val, np.arange(n), n_mediator, mtype)
indirect = {'names': m, 'coef': ab, 'se': ab_estimates.std(ddof=1, axis=0), 'pval': [], ll_name: [], ul_name: [], 'sig': []}
for j in range(n_mediator):
ci_j = _bca(ab_estimates[:, j], indirect['coef'][j], alpha=alpha, n_boot=n_boot)
indirect[ll_name].append(min(ci_j))
indirect[ul_name].append(max(ci_j))
# Bootstrapped p-value of indirect effect
# Note that this is less accurate than a permutation test because the
# bootstrap distribution is not conditioned on a true null hypothesis.
# For more details see Hayes and Rockwood 2017
indirect['pval'].append(_pval_from_bootci(ab_estimates[:, j], indirect['coef'][j]))
indirect['sig'].append('Yes' if indirect['pval'][j] < alpha else 'No') # depends on [control=['for'], data=['j']]
# Create output dataframe
indirect = pd.DataFrame.from_dict(indirect)
if n_mediator == 1:
indirect['names'] = 'Indirect' # depends on [control=['if'], data=[]]
else:
indirect['names'] = indirect['names'].apply(lambda x: 'Indirect %s' % x)
stats = stats.append(indirect, ignore_index=True)
stats = stats.rename(columns={'names': 'path'})
# Round
col_to_round = ['coef', 'se', ll_name, ul_name]
stats[col_to_round] = stats[col_to_round].round(4)
if return_dist:
return (stats, np.squeeze(ab_estimates)) # depends on [control=['if'], data=[]]
else:
return stats |
def change_state_id(self, state_id=None):
"""Changes the id of the state to a new id
If no state_id is passed as parameter, a new state id is generated.
:param str state_id: The new state id of the state
:return:
"""
if state_id is None:
state_id = state_id_generator(used_state_ids=[self.state_id])
if not self.is_root_state and not self.is_root_state_of_library:
used_ids = list(self.parent.states.keys()) + [self.parent.state_id, self.state_id]
if state_id in used_ids:
state_id = state_id_generator(used_state_ids=used_ids)
self._state_id = state_id | def function[change_state_id, parameter[self, state_id]]:
constant[Changes the id of the state to a new id
If no state_id is passed as parameter, a new state id is generated.
:param str state_id: The new state id of the state
:return:
]
if compare[name[state_id] is constant[None]] begin[:]
variable[state_id] assign[=] call[name[state_id_generator], parameter[]]
if <ast.BoolOp object at 0x7da2041d8df0> begin[:]
variable[used_ids] assign[=] binary_operation[call[name[list], parameter[call[name[self].parent.states.keys, parameter[]]]] + list[[<ast.Attribute object at 0x7da1b1ab9000>, <ast.Attribute object at 0x7da1b1abafb0>]]]
if compare[name[state_id] in name[used_ids]] begin[:]
variable[state_id] assign[=] call[name[state_id_generator], parameter[]]
name[self]._state_id assign[=] name[state_id] | keyword[def] identifier[change_state_id] ( identifier[self] , identifier[state_id] = keyword[None] ):
literal[string]
keyword[if] identifier[state_id] keyword[is] keyword[None] :
identifier[state_id] = identifier[state_id_generator] ( identifier[used_state_ids] =[ identifier[self] . identifier[state_id] ])
keyword[if] keyword[not] identifier[self] . identifier[is_root_state] keyword[and] keyword[not] identifier[self] . identifier[is_root_state_of_library] :
identifier[used_ids] = identifier[list] ( identifier[self] . identifier[parent] . identifier[states] . identifier[keys] ())+[ identifier[self] . identifier[parent] . identifier[state_id] , identifier[self] . identifier[state_id] ]
keyword[if] identifier[state_id] keyword[in] identifier[used_ids] :
identifier[state_id] = identifier[state_id_generator] ( identifier[used_state_ids] = identifier[used_ids] )
identifier[self] . identifier[_state_id] = identifier[state_id] | def change_state_id(self, state_id=None):
"""Changes the id of the state to a new id
If no state_id is passed as parameter, a new state id is generated.
:param str state_id: The new state id of the state
:return:
"""
if state_id is None:
state_id = state_id_generator(used_state_ids=[self.state_id]) # depends on [control=['if'], data=['state_id']]
if not self.is_root_state and (not self.is_root_state_of_library):
used_ids = list(self.parent.states.keys()) + [self.parent.state_id, self.state_id]
if state_id in used_ids:
state_id = state_id_generator(used_state_ids=used_ids) # depends on [control=['if'], data=['state_id', 'used_ids']] # depends on [control=['if'], data=[]]
self._state_id = state_id |
def compute_jacobian(ics, coordinates):
"""Construct a Jacobian for the given internal and Cartesian coordinates
Arguments:
| ``ics`` -- A list of internal coordinate objects.
| ``coordinates`` -- A numpy array with Cartesian coordinates,
shape=(N,3)
The return value will be a numpy array with the Jacobian matrix. There
will be a column for each internal coordinate, and a row for each
Cartesian coordinate (3*N rows).
"""
N3 = coordinates.size
jacobian = numpy.zeros((N3, len(ics)), float)
for j, ic in enumerate(ics):
# Let the ic object fill in each column of the Jacobian.
ic.fill_jacobian_column(jacobian[:,j], coordinates)
return jacobian | def function[compute_jacobian, parameter[ics, coordinates]]:
constant[Construct a Jacobian for the given internal and Cartesian coordinates
Arguments:
| ``ics`` -- A list of internal coordinate objects.
| ``coordinates`` -- A numpy array with Cartesian coordinates,
shape=(N,3)
The return value will be a numpy array with the Jacobian matrix. There
will be a column for each internal coordinate, and a row for each
Cartesian coordinate (3*N rows).
]
variable[N3] assign[=] name[coordinates].size
variable[jacobian] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da20c7c9630>, <ast.Call object at 0x7da20c7cac50>]], name[float]]]
for taget[tuple[[<ast.Name object at 0x7da20c7cbac0>, <ast.Name object at 0x7da20c7cab30>]]] in starred[call[name[enumerate], parameter[name[ics]]]] begin[:]
call[name[ic].fill_jacobian_column, parameter[call[name[jacobian]][tuple[[<ast.Slice object at 0x7da20c7c8eb0>, <ast.Name object at 0x7da20c7cb2e0>]]], name[coordinates]]]
return[name[jacobian]] | keyword[def] identifier[compute_jacobian] ( identifier[ics] , identifier[coordinates] ):
literal[string]
identifier[N3] = identifier[coordinates] . identifier[size]
identifier[jacobian] = identifier[numpy] . identifier[zeros] (( identifier[N3] , identifier[len] ( identifier[ics] )), identifier[float] )
keyword[for] identifier[j] , identifier[ic] keyword[in] identifier[enumerate] ( identifier[ics] ):
identifier[ic] . identifier[fill_jacobian_column] ( identifier[jacobian] [:, identifier[j] ], identifier[coordinates] )
keyword[return] identifier[jacobian] | def compute_jacobian(ics, coordinates):
"""Construct a Jacobian for the given internal and Cartesian coordinates
Arguments:
| ``ics`` -- A list of internal coordinate objects.
| ``coordinates`` -- A numpy array with Cartesian coordinates,
shape=(N,3)
The return value will be a numpy array with the Jacobian matrix. There
will be a column for each internal coordinate, and a row for each
Cartesian coordinate (3*N rows).
"""
N3 = coordinates.size
jacobian = numpy.zeros((N3, len(ics)), float)
for (j, ic) in enumerate(ics):
# Let the ic object fill in each column of the Jacobian.
ic.fill_jacobian_column(jacobian[:, j], coordinates) # depends on [control=['for'], data=[]]
return jacobian |
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = bytes(verb + path + str(nonce) + data, 'utf-8')
signature = hmac.new(secret.encode('utf-8'),
message,
digestmod=hashlib.sha256).hexdigest()
return signature | def function[generate_signature, parameter[secret, verb, url, nonce, data]]:
constant[Generate a request signature compatible with BitMEX.]
variable[parsedURL] assign[=] call[name[urllib].parse.urlparse, parameter[name[url]]]
variable[path] assign[=] name[parsedURL].path
if name[parsedURL].query begin[:]
variable[path] assign[=] binary_operation[binary_operation[name[path] + constant[?]] + name[parsedURL].query]
variable[message] assign[=] call[name[bytes], parameter[binary_operation[binary_operation[binary_operation[name[verb] + name[path]] + call[name[str], parameter[name[nonce]]]] + name[data]], constant[utf-8]]]
variable[signature] assign[=] call[call[name[hmac].new, parameter[call[name[secret].encode, parameter[constant[utf-8]]], name[message]]].hexdigest, parameter[]]
return[name[signature]] | keyword[def] identifier[generate_signature] ( identifier[secret] , identifier[verb] , identifier[url] , identifier[nonce] , identifier[data] ):
literal[string]
identifier[parsedURL] = identifier[urllib] . identifier[parse] . identifier[urlparse] ( identifier[url] )
identifier[path] = identifier[parsedURL] . identifier[path]
keyword[if] identifier[parsedURL] . identifier[query] :
identifier[path] = identifier[path] + literal[string] + identifier[parsedURL] . identifier[query]
identifier[message] = identifier[bytes] ( identifier[verb] + identifier[path] + identifier[str] ( identifier[nonce] )+ identifier[data] , literal[string] )
identifier[signature] = identifier[hmac] . identifier[new] ( identifier[secret] . identifier[encode] ( literal[string] ),
identifier[message] ,
identifier[digestmod] = identifier[hashlib] . identifier[sha256] ). identifier[hexdigest] ()
keyword[return] identifier[signature] | def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query # depends on [control=['if'], data=[]]
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = bytes(verb + path + str(nonce) + data, 'utf-8')
signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature |
def _clear_weave_cache():
"""Deletes the weave cache specified in os.environ['PYTHONCOMPILED']"""
cache_dir = os.environ['PYTHONCOMPILED']
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
logging.info("Cleared weave cache %s", cache_dir) | def function[_clear_weave_cache, parameter[]]:
constant[Deletes the weave cache specified in os.environ['PYTHONCOMPILED']]
variable[cache_dir] assign[=] call[name[os].environ][constant[PYTHONCOMPILED]]
if call[name[os].path.exists, parameter[name[cache_dir]]] begin[:]
call[name[shutil].rmtree, parameter[name[cache_dir]]]
call[name[logging].info, parameter[constant[Cleared weave cache %s], name[cache_dir]]] | keyword[def] identifier[_clear_weave_cache] ():
literal[string]
identifier[cache_dir] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[cache_dir] ):
identifier[shutil] . identifier[rmtree] ( identifier[cache_dir] )
identifier[logging] . identifier[info] ( literal[string] , identifier[cache_dir] ) | def _clear_weave_cache():
"""Deletes the weave cache specified in os.environ['PYTHONCOMPILED']"""
cache_dir = os.environ['PYTHONCOMPILED']
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir) # depends on [control=['if'], data=[]]
logging.info('Cleared weave cache %s', cache_dir) |
def get_email_context(self,**kwargs):
''' Overrides EmailRecipientMixin '''
context = super(TemporaryRegistration,self).get_email_context(**kwargs)
context.update({
'first_name': self.firstName,
'last_name': self.lastName,
'registrationComments': self.comments,
'registrationHowHeardAboutUs': self.howHeardAboutUs,
'eventList': [x.get_email_context(includeName=False) for x in self.temporaryeventregistration_set.all()],
})
if hasattr(self,'invoice') and self.invoice:
context.update({
'invoice': self.invoice.get_email_context(),
})
return context | def function[get_email_context, parameter[self]]:
constant[ Overrides EmailRecipientMixin ]
variable[context] assign[=] call[call[name[super], parameter[name[TemporaryRegistration], name[self]]].get_email_context, parameter[]]
call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da1b13e3460>, <ast.Constant object at 0x7da1b13e3eb0>, <ast.Constant object at 0x7da1b13e3fa0>, <ast.Constant object at 0x7da1b13e3e50>, <ast.Constant object at 0x7da1b13e36a0>], [<ast.Attribute object at 0x7da1b13e07f0>, <ast.Attribute object at 0x7da1b13e3220>, <ast.Attribute object at 0x7da1b13e34f0>, <ast.Attribute object at 0x7da1b13e0fa0>, <ast.ListComp object at 0x7da1b13e3a30>]]]]
if <ast.BoolOp object at 0x7da1b13e3b50> begin[:]
call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da1b13e2d10>], [<ast.Call object at 0x7da1b13e2800>]]]]
return[name[context]] | keyword[def] identifier[get_email_context] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[super] ( identifier[TemporaryRegistration] , identifier[self] ). identifier[get_email_context] (** identifier[kwargs] )
identifier[context] . identifier[update] ({
literal[string] : identifier[self] . identifier[firstName] ,
literal[string] : identifier[self] . identifier[lastName] ,
literal[string] : identifier[self] . identifier[comments] ,
literal[string] : identifier[self] . identifier[howHeardAboutUs] ,
literal[string] :[ identifier[x] . identifier[get_email_context] ( identifier[includeName] = keyword[False] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[temporaryeventregistration_set] . identifier[all] ()],
})
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[invoice] :
identifier[context] . identifier[update] ({
literal[string] : identifier[self] . identifier[invoice] . identifier[get_email_context] (),
})
keyword[return] identifier[context] | def get_email_context(self, **kwargs):
""" Overrides EmailRecipientMixin """
context = super(TemporaryRegistration, self).get_email_context(**kwargs)
context.update({'first_name': self.firstName, 'last_name': self.lastName, 'registrationComments': self.comments, 'registrationHowHeardAboutUs': self.howHeardAboutUs, 'eventList': [x.get_email_context(includeName=False) for x in self.temporaryeventregistration_set.all()]})
if hasattr(self, 'invoice') and self.invoice:
context.update({'invoice': self.invoice.get_email_context()}) # depends on [control=['if'], data=[]]
return context |
def update_maintenance_window(self, id, **kwargs): # noqa: E501
"""Update a specific maintenance window # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_maintenance_window(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param MaintenanceWindow body: Example Body: <pre>{ \"reason\": \"MW Reason\", \"title\": \"MW Title\", \"startTimeInSeconds\": 1483228800, \"endTimeInSeconds\": 1483232400, \"relevantCustomerTags\": [ \"alertId1\" ], \"relevantHostTags\": [ \"sourceTag1\" ] }</pre>
:return: ResponseContainerMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_maintenance_window_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_maintenance_window_with_http_info(id, **kwargs) # noqa: E501
return data | def function[update_maintenance_window, parameter[self, id]]:
constant[Update a specific maintenance window # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_maintenance_window(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param MaintenanceWindow body: Example Body: <pre>{ "reason": "MW Reason", "title": "MW Title", "startTimeInSeconds": 1483228800, "endTimeInSeconds": 1483232400, "relevantCustomerTags": [ "alertId1" ], "relevantHostTags": [ "sourceTag1" ] }</pre>
:return: ResponseContainerMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].update_maintenance_window_with_http_info, parameter[name[id]]]] | keyword[def] identifier[update_maintenance_window] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[update_maintenance_window_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[update_maintenance_window_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_maintenance_window(self, id, **kwargs): # noqa: E501
'Update a specific maintenance window # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_maintenance_window(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :param MaintenanceWindow body: Example Body: <pre>{ "reason": "MW Reason", "title": "MW Title", "startTimeInSeconds": 1483228800, "endTimeInSeconds": 1483232400, "relevantCustomerTags": [ "alertId1" ], "relevantHostTags": [ "sourceTag1" ] }</pre>\n :return: ResponseContainerMaintenanceWindow\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_maintenance_window_with_http_info(id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.update_maintenance_window_with_http_info(id, **kwargs) # noqa: E501
return data |
def _groupname():
'''
Grain for the minion groupname
'''
if grp:
try:
groupname = grp.getgrgid(os.getgid()).gr_name
except KeyError:
groupname = ''
else:
groupname = ''
return groupname | def function[_groupname, parameter[]]:
constant[
Grain for the minion groupname
]
if name[grp] begin[:]
<ast.Try object at 0x7da1b1c66f50>
return[name[groupname]] | keyword[def] identifier[_groupname] ():
literal[string]
keyword[if] identifier[grp] :
keyword[try] :
identifier[groupname] = identifier[grp] . identifier[getgrgid] ( identifier[os] . identifier[getgid] ()). identifier[gr_name]
keyword[except] identifier[KeyError] :
identifier[groupname] = literal[string]
keyword[else] :
identifier[groupname] = literal[string]
keyword[return] identifier[groupname] | def _groupname():
"""
Grain for the minion groupname
"""
if grp:
try:
groupname = grp.getgrgid(os.getgid()).gr_name # depends on [control=['try'], data=[]]
except KeyError:
groupname = '' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
groupname = ''
return groupname |
def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | def function[get_archs, parameter[libname]]:
constant[ Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
]
if <ast.UnaryOp object at 0x7da204345240> begin[:]
<ast.Raise object at 0x7da204345f30>
<ast.Try object at 0x7da204344c70>
variable[lines] assign[=] <ast.ListComp object at 0x7da204345930>
if compare[call[name[lines]][constant[0]] equal[==] call[constant[input file {0} is not a fat file].format, parameter[name[libname]]]] begin[:]
variable[line] assign[=] call[name[lines]][constant[1]]
for taget[name[reggie]] in starred[tuple[[<ast.Call object at 0x7da207f99d20>, <ast.Call object at 0x7da207f98400>]]] begin[:]
variable[reggie] assign[=] call[name[re].compile, parameter[name[reggie]]]
variable[match] assign[=] call[name[reggie].match, parameter[name[line]]]
if <ast.UnaryOp object at 0x7da204347490> begin[:]
return[call[name[frozenset], parameter[call[call[call[name[match].groups, parameter[]]][constant[0]].split, parameter[constant[ ]]]]]]
<ast.Raise object at 0x7da204346bf0> | keyword[def] identifier[get_archs] ( identifier[libname] ):
literal[string]
keyword[if] keyword[not] identifier[exists] ( identifier[libname] ):
keyword[raise] identifier[RuntimeError] ( identifier[libname] + literal[string] )
keyword[try] :
identifier[stdout] = identifier[back_tick] ([ literal[string] , literal[string] , identifier[libname] ])
keyword[except] identifier[RuntimeError] :
keyword[return] identifier[frozenset] ()
identifier[lines] =[ identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[stdout] . identifier[split] ( literal[string] ) keyword[if] identifier[line] . identifier[strip] ()]
keyword[if] identifier[lines] [ literal[int] ]== literal[string] . identifier[format] ( identifier[libname] ):
identifier[line] = identifier[lines] [ literal[int] ]
keyword[else] :
keyword[assert] identifier[len] ( identifier[lines] )== literal[int]
identifier[line] = identifier[lines] [ literal[int] ]
keyword[for] identifier[reggie] keyword[in] (
literal[string] . identifier[format] ( identifier[libname] ),
literal[string] . identifier[format] ( identifier[libname] )):
identifier[reggie] = identifier[re] . identifier[compile] ( identifier[reggie] )
identifier[match] = identifier[reggie] . identifier[match] ( identifier[line] )
keyword[if] keyword[not] identifier[match] keyword[is] keyword[None] :
keyword[return] identifier[frozenset] ( identifier[match] . identifier[groups] ()[ literal[int] ]. identifier[split] ( literal[string] ))
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[stdout] , identifier[libname] )) | def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + ' is not a file') # depends on [control=['if'], data=[]]
try:
stdout = back_tick(['lipo', '-info', libname]) # depends on [control=['try'], data=[]]
except RuntimeError:
return frozenset() # depends on [control=['except'], data=[]]
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == 'input file {0} is not a fat file'.format(libname):
line = lines[1] # depends on [control=['if'], data=[]]
else:
assert len(lines) == 1
line = lines[0]
for reggie in ('Non-fat file: {0} is architecture: (.*)'.format(libname), 'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' ')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reggie']]
raise ValueError("Unexpected output: '{0}' for {1}".format(stdout, libname)) |
def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial | def function[_basis_polynomial_factory, parameter[cls, kind]]:
constant[Return a polynomial given some coefficients.]
variable[valid_kind] assign[=] call[name[cls]._validate, parameter[name[kind]]]
variable[basis_polynomial] assign[=] call[name[getattr], parameter[name[np].polynomial, name[valid_kind]]]
return[name[basis_polynomial]] | keyword[def] identifier[_basis_polynomial_factory] ( identifier[cls] , identifier[kind] ):
literal[string]
identifier[valid_kind] = identifier[cls] . identifier[_validate] ( identifier[kind] )
identifier[basis_polynomial] = identifier[getattr] ( identifier[np] . identifier[polynomial] , identifier[valid_kind] )
keyword[return] identifier[basis_polynomial] | def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial |
def get_project_language_analytics(self, project):
"""GetProjectLanguageAnalytics.
[Preview API]
:param str project: Project ID or project name
:rtype: :class:`<ProjectLanguageAnalytics> <azure.devops.v5_0.project_analysis.models.ProjectLanguageAnalytics>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='5b02a779-1867-433f-90b7-d23ed5e33e57',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('ProjectLanguageAnalytics', response) | def function[get_project_language_analytics, parameter[self, project]]:
constant[GetProjectLanguageAnalytics.
[Preview API]
:param str project: Project ID or project name
:rtype: :class:`<ProjectLanguageAnalytics> <azure.devops.v5_0.project_analysis.models.ProjectLanguageAnalytics>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[ProjectLanguageAnalytics], name[response]]]] | keyword[def] identifier[get_project_language_analytics] ( identifier[self] , identifier[project] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def get_project_language_analytics(self, project):
"""GetProjectLanguageAnalytics.
[Preview API]
:param str project: Project ID or project name
:rtype: :class:`<ProjectLanguageAnalytics> <azure.devops.v5_0.project_analysis.models.ProjectLanguageAnalytics>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
response = self._send(http_method='GET', location_id='5b02a779-1867-433f-90b7-d23ed5e33e57', version='5.0-preview.1', route_values=route_values)
return self._deserialize('ProjectLanguageAnalytics', response) |
def mask_elliptical_from_shape_pixel_scale_and_radius(shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi,
centre=(0.0, 0.0)):
"""Compute a circular masks from an input masks radius and regular shape."""
mask = np.full(shape, True)
centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale,
centre=centre)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
y_arcsec = (y - centres_arcsec[0]) * pixel_scale
x_arcsec = (x - centres_arcsec[1]) * pixel_scale
r_arcsec_elliptical = elliptical_radius_from_y_x_phi_and_axis_ratio(y_arcsec, x_arcsec, phi, axis_ratio)
if r_arcsec_elliptical <= major_axis_radius_arcsec:
mask[y, x] = False
return mask | def function[mask_elliptical_from_shape_pixel_scale_and_radius, parameter[shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre]]:
constant[Compute a circular masks from an input masks radius and regular shape.]
variable[mask] assign[=] call[name[np].full, parameter[name[shape], constant[True]]]
variable[centres_arcsec] assign[=] call[name[mask_centres_from_shape_pixel_scale_and_centre], parameter[]]
for taget[name[y]] in starred[call[name[range], parameter[call[name[mask].shape][constant[0]]]]] begin[:]
for taget[name[x]] in starred[call[name[range], parameter[call[name[mask].shape][constant[1]]]]] begin[:]
variable[y_arcsec] assign[=] binary_operation[binary_operation[name[y] - call[name[centres_arcsec]][constant[0]]] * name[pixel_scale]]
variable[x_arcsec] assign[=] binary_operation[binary_operation[name[x] - call[name[centres_arcsec]][constant[1]]] * name[pixel_scale]]
variable[r_arcsec_elliptical] assign[=] call[name[elliptical_radius_from_y_x_phi_and_axis_ratio], parameter[name[y_arcsec], name[x_arcsec], name[phi], name[axis_ratio]]]
if compare[name[r_arcsec_elliptical] less_or_equal[<=] name[major_axis_radius_arcsec]] begin[:]
call[name[mask]][tuple[[<ast.Name object at 0x7da20c76d120>, <ast.Name object at 0x7da20c76d000>]]] assign[=] constant[False]
return[name[mask]] | keyword[def] identifier[mask_elliptical_from_shape_pixel_scale_and_radius] ( identifier[shape] , identifier[pixel_scale] , identifier[major_axis_radius_arcsec] , identifier[axis_ratio] , identifier[phi] ,
identifier[centre] =( literal[int] , literal[int] )):
literal[string]
identifier[mask] = identifier[np] . identifier[full] ( identifier[shape] , keyword[True] )
identifier[centres_arcsec] = identifier[mask_centres_from_shape_pixel_scale_and_centre] ( identifier[shape] = identifier[mask] . identifier[shape] , identifier[pixel_scale] = identifier[pixel_scale] ,
identifier[centre] = identifier[centre] )
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]):
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]):
identifier[y_arcsec] =( identifier[y] - identifier[centres_arcsec] [ literal[int] ])* identifier[pixel_scale]
identifier[x_arcsec] =( identifier[x] - identifier[centres_arcsec] [ literal[int] ])* identifier[pixel_scale]
identifier[r_arcsec_elliptical] = identifier[elliptical_radius_from_y_x_phi_and_axis_ratio] ( identifier[y_arcsec] , identifier[x_arcsec] , identifier[phi] , identifier[axis_ratio] )
keyword[if] identifier[r_arcsec_elliptical] <= identifier[major_axis_radius_arcsec] :
identifier[mask] [ identifier[y] , identifier[x] ]= keyword[False]
keyword[return] identifier[mask] | def mask_elliptical_from_shape_pixel_scale_and_radius(shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre=(0.0, 0.0)):
"""Compute a circular masks from an input masks radius and regular shape."""
mask = np.full(shape, True)
centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
y_arcsec = (y - centres_arcsec[0]) * pixel_scale
x_arcsec = (x - centres_arcsec[1]) * pixel_scale
r_arcsec_elliptical = elliptical_radius_from_y_x_phi_and_axis_ratio(y_arcsec, x_arcsec, phi, axis_ratio)
if r_arcsec_elliptical <= major_axis_radius_arcsec:
mask[y, x] = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']]
return mask |
def exit():
"""
shuts down user interface without cleaning up.
Use a :class:`alot.commands.globals.ExitCommand` for a clean shutdown.
"""
exit_msg = None
try:
loop = asyncio.get_event_loop()
loop.stop()
except Exception as e:
logging.error('Could not stop loop: %s\nShutting down anyway..',
str(e)) | def function[exit, parameter[]]:
constant[
shuts down user interface without cleaning up.
Use a :class:`alot.commands.globals.ExitCommand` for a clean shutdown.
]
variable[exit_msg] assign[=] constant[None]
<ast.Try object at 0x7da1b0845690> | keyword[def] identifier[exit] ():
literal[string]
identifier[exit_msg] = keyword[None]
keyword[try] :
identifier[loop] = identifier[asyncio] . identifier[get_event_loop] ()
identifier[loop] . identifier[stop] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] ,
identifier[str] ( identifier[e] )) | def exit():
"""
shuts down user interface without cleaning up.
Use a :class:`alot.commands.globals.ExitCommand` for a clean shutdown.
"""
exit_msg = None
try:
loop = asyncio.get_event_loop()
loop.stop() # depends on [control=['try'], data=[]]
except Exception as e:
logging.error('Could not stop loop: %s\nShutting down anyway..', str(e)) # depends on [control=['except'], data=['e']] |
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords)) | def function[merge, parameter[self, other]]:
constant[Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
]
from relative_module[dataset] import module[Dataset]
if compare[name[other] is constant[None]] begin[:]
return[call[name[self].to_dataset, parameter[]]] | keyword[def] identifier[merge] ( identifier[self] , identifier[other] ):
literal[string]
keyword[from] . identifier[dataset] keyword[import] identifier[Dataset]
keyword[if] identifier[other] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[to_dataset] ()
keyword[else] :
identifier[other_vars] = identifier[getattr] ( identifier[other] , literal[string] , identifier[other] )
identifier[coords] = identifier[expand_and_merge_variables] ([ identifier[self] . identifier[variables] , identifier[other_vars] ])
keyword[return] identifier[Dataset] . identifier[_from_vars_and_coord_names] ( identifier[coords] , identifier[set] ( identifier[coords] )) | def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset() # depends on [control=['if'], data=[]]
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords)) |
def _ohlc_dict(df_or_figure,open='',high='',low='',close='',volume='',
validate='',**kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir={}
ohlcv=['open','high','low','close','volume']
if type(df_or_figure)==pd.DataFrame:
cnames=df_or_figure.columns
elif type(df_or_figure)==Figure or type(df_or_figure) == dict:
cnames=df_or_figure.axis['ref'].keys()
elif type(df_or_figure)==pd.Series:
cnames=[df_or_figure.name]
c_min=dict([(v.lower(),v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_]=c_min[_]
else:
for c in cnames:
if _ in c.lower():
c_dir[_]=c
if open:
c_dir['open']=open
if high:
c_dir['high']=high
if low:
c_dir['low']=low
if close:
c_dir['close']=close
if volume:
c_dir['volume']=volume
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v))
if validate:
errs=[]
val=validate.lower()
s_names=dict([(_[0],_) for _ in ohlcv])
cols=[_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_])
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs)))
return c_dir | def function[_ohlc_dict, parameter[df_or_figure, open, high, low, close, volume, validate]]:
constant[
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
]
variable[c_dir] assign[=] dictionary[[], []]
variable[ohlcv] assign[=] list[[<ast.Constant object at 0x7da1b1b13a90>, <ast.Constant object at 0x7da1b1b13a60>, <ast.Constant object at 0x7da1b1b13a30>, <ast.Constant object at 0x7da1b1b13a00>, <ast.Constant object at 0x7da1b1b139d0>]]
if compare[call[name[type], parameter[name[df_or_figure]]] equal[==] name[pd].DataFrame] begin[:]
variable[cnames] assign[=] name[df_or_figure].columns
variable[c_min] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b1b130d0>]]
for taget[name[_]] in starred[name[ohlcv]] begin[:]
if compare[name[_] in call[name[c_min].keys, parameter[]]] begin[:]
call[name[c_dir]][name[_]] assign[=] call[name[c_min]][name[_]]
if name[open] begin[:]
call[name[c_dir]][constant[open]] assign[=] name[open]
if name[high] begin[:]
call[name[c_dir]][constant[high]] assign[=] name[high]
if name[low] begin[:]
call[name[c_dir]][constant[low]] assign[=] name[low]
if name[close] begin[:]
call[name[c_dir]][constant[close]] assign[=] name[close]
if name[volume] begin[:]
call[name[c_dir]][constant[volume]] assign[=] name[volume]
for taget[name[v]] in starred[call[name[list], parameter[call[name[c_dir].values, parameter[]]]]] begin[:]
if compare[name[v] <ast.NotIn object at 0x7da2590d7190> name[cnames]] begin[:]
<ast.Raise object at 0x7da1b1b10a60>
if name[validate] begin[:]
variable[errs] assign[=] list[[]]
variable[val] assign[=] call[name[validate].lower, parameter[]]
variable[s_names] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b1b10e80>]]
variable[cols] assign[=] <ast.ListComp object at 0x7da1b1cc64a0>
for taget[name[_]] in starred[name[val]] begin[:]
if compare[name[_] <ast.NotIn object at 0x7da2590d7190> name[cols]] begin[:]
call[name[errs].append, parameter[call[name[s_names]][name[_]]]]
if name[errs] begin[:]
<ast.Raise object at 0x7da1b216ca60>
return[name[c_dir]] | keyword[def] identifier[_ohlc_dict] ( identifier[df_or_figure] , identifier[open] = literal[string] , identifier[high] = literal[string] , identifier[low] = literal[string] , identifier[close] = literal[string] , identifier[volume] = literal[string] ,
identifier[validate] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[c_dir] ={}
identifier[ohlcv] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[type] ( identifier[df_or_figure] )== identifier[pd] . identifier[DataFrame] :
identifier[cnames] = identifier[df_or_figure] . identifier[columns]
keyword[elif] identifier[type] ( identifier[df_or_figure] )== identifier[Figure] keyword[or] identifier[type] ( identifier[df_or_figure] )== identifier[dict] :
identifier[cnames] = identifier[df_or_figure] . identifier[axis] [ literal[string] ]. identifier[keys] ()
keyword[elif] identifier[type] ( identifier[df_or_figure] )== identifier[pd] . identifier[Series] :
identifier[cnames] =[ identifier[df_or_figure] . identifier[name] ]
identifier[c_min] = identifier[dict] ([( identifier[v] . identifier[lower] (), identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[cnames] ])
keyword[for] identifier[_] keyword[in] identifier[ohlcv] :
keyword[if] identifier[_] keyword[in] identifier[c_min] . identifier[keys] ():
identifier[c_dir] [ identifier[_] ]= identifier[c_min] [ identifier[_] ]
keyword[else] :
keyword[for] identifier[c] keyword[in] identifier[cnames] :
keyword[if] identifier[_] keyword[in] identifier[c] . identifier[lower] ():
identifier[c_dir] [ identifier[_] ]= identifier[c]
keyword[if] identifier[open] :
identifier[c_dir] [ literal[string] ]= identifier[open]
keyword[if] identifier[high] :
identifier[c_dir] [ literal[string] ]= identifier[high]
keyword[if] identifier[low] :
identifier[c_dir] [ literal[string] ]= identifier[low]
keyword[if] identifier[close] :
identifier[c_dir] [ literal[string] ]= identifier[close]
keyword[if] identifier[volume] :
identifier[c_dir] [ literal[string] ]= identifier[volume]
keyword[for] identifier[v] keyword[in] identifier[list] ( identifier[c_dir] . identifier[values] ()):
keyword[if] identifier[v] keyword[not] keyword[in] identifier[cnames] :
keyword[raise] identifier[StudyError] ( literal[string] . identifier[format] ( identifier[v] ))
keyword[if] identifier[validate] :
identifier[errs] =[]
identifier[val] = identifier[validate] . identifier[lower] ()
identifier[s_names] = identifier[dict] ([( identifier[_] [ literal[int] ], identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[ohlcv] ])
identifier[cols] =[ identifier[_] [ literal[int] ] keyword[for] identifier[_] keyword[in] identifier[c_dir] . identifier[keys] ()]
keyword[for] identifier[_] keyword[in] identifier[val] :
keyword[if] identifier[_] keyword[not] keyword[in] identifier[cols] :
identifier[errs] . identifier[append] ( identifier[s_names] [ identifier[_] ])
keyword[if] identifier[errs] :
keyword[raise] identifier[StudyError] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[errs] )))
keyword[return] identifier[c_dir] | def _ohlc_dict(df_or_figure, open='', high='', low='', close='', volume='', validate='', **kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir = {}
ohlcv = ['open', 'high', 'low', 'close', 'volume']
if type(df_or_figure) == pd.DataFrame:
cnames = df_or_figure.columns # depends on [control=['if'], data=[]]
elif type(df_or_figure) == Figure or type(df_or_figure) == dict:
cnames = df_or_figure.axis['ref'].keys() # depends on [control=['if'], data=[]]
elif type(df_or_figure) == pd.Series:
cnames = [df_or_figure.name] # depends on [control=['if'], data=[]]
c_min = dict([(v.lower(), v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_] = c_min[_] # depends on [control=['if'], data=['_']]
else:
for c in cnames:
if _ in c.lower():
c_dir[_] = c # depends on [control=['if'], data=['_']] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['_']]
if open:
c_dir['open'] = open # depends on [control=['if'], data=[]]
if high:
c_dir['high'] = high # depends on [control=['if'], data=[]]
if low:
c_dir['low'] = low # depends on [control=['if'], data=[]]
if close:
c_dir['close'] = close # depends on [control=['if'], data=[]]
if volume:
c_dir['volume'] = volume # depends on [control=['if'], data=[]]
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v)) # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=['v']]
if validate:
errs = []
val = validate.lower()
s_names = dict([(_[0], _) for _ in ohlcv])
cols = [_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_]) # depends on [control=['if'], data=['_']] # depends on [control=['for'], data=['_']]
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return c_dir |
def minor_residues_per_turn(self, minor_repeat=None):
"""Calculates the number of residues per turn of the minor helix.
Parameters
----------
minor_repeat : float, optional
Hydrophobic repeat of the minor helix.
Returns
-------
minor_rpt : float
Residues per turn of the minor helix.
"""
if minor_repeat is None:
minor_rpt = _helix_parameters[self.minor_helix_type][0]
else:
# precession angle in radians
precession = self.curve.t_from_arc_length(
minor_repeat * self.minor_rise_per_residue)
if self.orientation == -1:
precession = -precession
if self.major_handedness != self.minor_handedness:
precession = -precession
minor_rpt = ((minor_repeat * numpy.pi * 2) /
((2 * numpy.pi) + precession))
return minor_rpt | def function[minor_residues_per_turn, parameter[self, minor_repeat]]:
constant[Calculates the number of residues per turn of the minor helix.
Parameters
----------
minor_repeat : float, optional
Hydrophobic repeat of the minor helix.
Returns
-------
minor_rpt : float
Residues per turn of the minor helix.
]
if compare[name[minor_repeat] is constant[None]] begin[:]
variable[minor_rpt] assign[=] call[call[name[_helix_parameters]][name[self].minor_helix_type]][constant[0]]
return[name[minor_rpt]] | keyword[def] identifier[minor_residues_per_turn] ( identifier[self] , identifier[minor_repeat] = keyword[None] ):
literal[string]
keyword[if] identifier[minor_repeat] keyword[is] keyword[None] :
identifier[minor_rpt] = identifier[_helix_parameters] [ identifier[self] . identifier[minor_helix_type] ][ literal[int] ]
keyword[else] :
identifier[precession] = identifier[self] . identifier[curve] . identifier[t_from_arc_length] (
identifier[minor_repeat] * identifier[self] . identifier[minor_rise_per_residue] )
keyword[if] identifier[self] . identifier[orientation] ==- literal[int] :
identifier[precession] =- identifier[precession]
keyword[if] identifier[self] . identifier[major_handedness] != identifier[self] . identifier[minor_handedness] :
identifier[precession] =- identifier[precession]
identifier[minor_rpt] =(( identifier[minor_repeat] * identifier[numpy] . identifier[pi] * literal[int] )/
(( literal[int] * identifier[numpy] . identifier[pi] )+ identifier[precession] ))
keyword[return] identifier[minor_rpt] | def minor_residues_per_turn(self, minor_repeat=None):
"""Calculates the number of residues per turn of the minor helix.
Parameters
----------
minor_repeat : float, optional
Hydrophobic repeat of the minor helix.
Returns
-------
minor_rpt : float
Residues per turn of the minor helix.
"""
if minor_repeat is None:
minor_rpt = _helix_parameters[self.minor_helix_type][0] # depends on [control=['if'], data=[]]
else:
# precession angle in radians
precession = self.curve.t_from_arc_length(minor_repeat * self.minor_rise_per_residue)
if self.orientation == -1:
precession = -precession # depends on [control=['if'], data=[]]
if self.major_handedness != self.minor_handedness:
precession = -precession # depends on [control=['if'], data=[]]
minor_rpt = minor_repeat * numpy.pi * 2 / (2 * numpy.pi + precession)
return minor_rpt |
def createTable(self, tableName, strFields) :
'creates a table and resturns the ursor, if the table already exists returns None'
if not self.tableExits(tableName) :
sql = 'CREATE TABLE %s ( %s)' % (tableName, strFields)
self.execute(sql)
self.tables.add(tableName)
return True
return False | def function[createTable, parameter[self, tableName, strFields]]:
constant[creates a table and resturns the ursor, if the table already exists returns None]
if <ast.UnaryOp object at 0x7da1b0ac07f0> begin[:]
variable[sql] assign[=] binary_operation[constant[CREATE TABLE %s ( %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ac0250>, <ast.Name object at 0x7da1b0ac0190>]]]
call[name[self].execute, parameter[name[sql]]]
call[name[self].tables.add, parameter[name[tableName]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[createTable] ( identifier[self] , identifier[tableName] , identifier[strFields] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[tableExits] ( identifier[tableName] ):
identifier[sql] = literal[string] %( identifier[tableName] , identifier[strFields] )
identifier[self] . identifier[execute] ( identifier[sql] )
identifier[self] . identifier[tables] . identifier[add] ( identifier[tableName] )
keyword[return] keyword[True]
keyword[return] keyword[False] | def createTable(self, tableName, strFields):
"""creates a table and resturns the ursor, if the table already exists returns None"""
if not self.tableExits(tableName):
sql = 'CREATE TABLE %s ( %s)' % (tableName, strFields)
self.execute(sql)
self.tables.add(tableName)
return True # depends on [control=['if'], data=[]]
return False |
def delete_log_entry(self, log_entry_id):
"""Deletes a ``LogEntry``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``log_entry_id`` to remove
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
if not isinstance(log_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
log_entry_map = collection.find_one(
dict({'_id': ObjectId(log_entry_id.get_identifier())},
**self._view_filter()))
objects.LogEntry(osid_object_map=log_entry_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(log_entry_id.get_identifier())}) | def function[delete_log_entry, parameter[self, log_entry_id]]:
constant[Deletes a ``LogEntry``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``log_entry_id`` to remove
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[logging]]]
if <ast.UnaryOp object at 0x7da18c4cd360> begin[:]
<ast.Raise object at 0x7da20c6c5db0>
variable[log_entry_map] assign[=] call[name[collection].find_one, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c6c7970>], [<ast.Call object at 0x7da20c6c5ab0>]]]]]]
call[call[name[objects].LogEntry, parameter[]]._delete, parameter[]]
call[name[collection].delete_one, parameter[dictionary[[<ast.Constant object at 0x7da20c6c7430>], [<ast.Call object at 0x7da20c6c58d0>]]]] | keyword[def] identifier[delete_log_entry] ( identifier[self] , identifier[log_entry_id] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[log_entry_id] , identifier[ABCId] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
identifier[log_entry_map] = identifier[collection] . identifier[find_one] (
identifier[dict] ({ literal[string] : identifier[ObjectId] ( identifier[log_entry_id] . identifier[get_identifier] ())},
** identifier[self] . identifier[_view_filter] ()))
identifier[objects] . identifier[LogEntry] ( identifier[osid_object_map] = identifier[log_entry_map] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ). identifier[_delete] ()
identifier[collection] . identifier[delete_one] ({ literal[string] : identifier[ObjectId] ( identifier[log_entry_id] . identifier[get_identifier] ())}) | def delete_log_entry(self, log_entry_id):
"""Deletes a ``LogEntry``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``log_entry_id`` to remove
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime)
if not isinstance(log_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id') # depends on [control=['if'], data=[]]
log_entry_map = collection.find_one(dict({'_id': ObjectId(log_entry_id.get_identifier())}, **self._view_filter()))
objects.LogEntry(osid_object_map=log_entry_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(log_entry_id.get_identifier())}) |
def remove_profile(self):
"""Remove the current profile.
Make sure the user is sure.
"""
profile_name = self.profile_combo.currentText()
# noinspection PyTypeChecker
button_selected = QMessageBox.warning(
None,
'Remove Profile',
self.tr('Remove %s.') % profile_name,
QMessageBox.Ok,
QMessageBox.Cancel
)
if button_selected == QMessageBox.Ok:
self.profile_combo.removeItem(
self.profile_combo.currentIndex()
)
self.minimum_needs.remove_profile(profile_name)
self.select_profile(self.profile_combo.currentIndex()) | def function[remove_profile, parameter[self]]:
constant[Remove the current profile.
Make sure the user is sure.
]
variable[profile_name] assign[=] call[name[self].profile_combo.currentText, parameter[]]
variable[button_selected] assign[=] call[name[QMessageBox].warning, parameter[constant[None], constant[Remove Profile], binary_operation[call[name[self].tr, parameter[constant[Remove %s.]]] <ast.Mod object at 0x7da2590d6920> name[profile_name]], name[QMessageBox].Ok, name[QMessageBox].Cancel]]
if compare[name[button_selected] equal[==] name[QMessageBox].Ok] begin[:]
call[name[self].profile_combo.removeItem, parameter[call[name[self].profile_combo.currentIndex, parameter[]]]]
call[name[self].minimum_needs.remove_profile, parameter[name[profile_name]]]
call[name[self].select_profile, parameter[call[name[self].profile_combo.currentIndex, parameter[]]]] | keyword[def] identifier[remove_profile] ( identifier[self] ):
literal[string]
identifier[profile_name] = identifier[self] . identifier[profile_combo] . identifier[currentText] ()
identifier[button_selected] = identifier[QMessageBox] . identifier[warning] (
keyword[None] ,
literal[string] ,
identifier[self] . identifier[tr] ( literal[string] )% identifier[profile_name] ,
identifier[QMessageBox] . identifier[Ok] ,
identifier[QMessageBox] . identifier[Cancel]
)
keyword[if] identifier[button_selected] == identifier[QMessageBox] . identifier[Ok] :
identifier[self] . identifier[profile_combo] . identifier[removeItem] (
identifier[self] . identifier[profile_combo] . identifier[currentIndex] ()
)
identifier[self] . identifier[minimum_needs] . identifier[remove_profile] ( identifier[profile_name] )
identifier[self] . identifier[select_profile] ( identifier[self] . identifier[profile_combo] . identifier[currentIndex] ()) | def remove_profile(self):
"""Remove the current profile.
Make sure the user is sure.
"""
profile_name = self.profile_combo.currentText()
# noinspection PyTypeChecker
button_selected = QMessageBox.warning(None, 'Remove Profile', self.tr('Remove %s.') % profile_name, QMessageBox.Ok, QMessageBox.Cancel)
if button_selected == QMessageBox.Ok:
self.profile_combo.removeItem(self.profile_combo.currentIndex())
self.minimum_needs.remove_profile(profile_name)
self.select_profile(self.profile_combo.currentIndex()) # depends on [control=['if'], data=[]] |
def close(self):
"""
Closes this VirtualBox VM.
"""
if self._closed:
# VM is already closed
return
if not (yield from super().close()):
return False
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
for udp_tunnel in self._local_udp_tunnels.values():
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project)
self._local_udp_tunnels = {}
self.acpi_shutdown = False
yield from self.stop()
if self.linked_clone:
hdd_table = yield from self.save_linked_hdds_info()
for hdd in hdd_table.copy():
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"]))
try:
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"],
hdd["port"],
hdd["device"]))
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"],
error=e))
continue
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
yield from self.manager.execute("unregistervm", [self._name])
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
self._closed = True | def function[close, parameter[self]]:
constant[
Closes this VirtualBox VM.
]
if name[self]._closed begin[:]
return[None]
if <ast.UnaryOp object at 0x7da207f00280> begin[:]
return[constant[False]]
call[name[log].debug, parameter[call[constant[VirtualBox VM '{name}' [{id}] is closing].format, parameter[]]]]
if name[self]._console begin[:]
call[name[self]._manager.port_manager.release_tcp_port, parameter[name[self]._console, name[self]._project]]
name[self]._console assign[=] constant[None]
for taget[name[adapter]] in starred[call[name[self]._ethernet_adapters.values, parameter[]]] begin[:]
if compare[name[adapter] is_not constant[None]] begin[:]
for taget[name[nio]] in starred[call[name[adapter].ports.values, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da207f03f10> begin[:]
call[name[self].manager.port_manager.release_udp_port, parameter[name[nio].lport, name[self]._project]]
for taget[name[udp_tunnel]] in starred[call[name[self]._local_udp_tunnels.values, parameter[]]] begin[:]
call[name[self].manager.port_manager.release_udp_port, parameter[call[name[udp_tunnel]][constant[0]].lport, name[self]._project]]
call[name[self].manager.port_manager.release_udp_port, parameter[call[name[udp_tunnel]][constant[1]].lport, name[self]._project]]
name[self]._local_udp_tunnels assign[=] dictionary[[], []]
name[self].acpi_shutdown assign[=] constant[False]
<ast.YieldFrom object at 0x7da207f000d0>
if name[self].linked_clone begin[:]
variable[hdd_table] assign[=] <ast.YieldFrom object at 0x7da2054a6500>
for taget[name[hdd]] in starred[call[name[hdd_table].copy, parameter[]]] begin[:]
call[name[log].info, parameter[call[constant[VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}].format, parameter[]]]]
<ast.Try object at 0x7da2047ebbb0>
call[name[log].info, parameter[call[constant[VirtualBox VM '{name}' [{id}] unregistering].format, parameter[]]]]
<ast.YieldFrom object at 0x7da204567520>
call[name[log].info, parameter[call[constant[VirtualBox VM '{name}' [{id}] closed].format, parameter[]]]]
name[self]._closed assign[=] constant[True] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_closed] :
keyword[return]
keyword[if] keyword[not] ( keyword[yield] keyword[from] identifier[super] (). identifier[close] ()):
keyword[return] keyword[False]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] , identifier[id] = identifier[self] . identifier[id] ))
keyword[if] identifier[self] . identifier[_console] :
identifier[self] . identifier[_manager] . identifier[port_manager] . identifier[release_tcp_port] ( identifier[self] . identifier[_console] , identifier[self] . identifier[_project] )
identifier[self] . identifier[_console] = keyword[None]
keyword[for] identifier[adapter] keyword[in] identifier[self] . identifier[_ethernet_adapters] . identifier[values] ():
keyword[if] identifier[adapter] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[nio] keyword[in] identifier[adapter] . identifier[ports] . identifier[values] ():
keyword[if] identifier[nio] keyword[and] identifier[isinstance] ( identifier[nio] , identifier[NIOUDP] ):
identifier[self] . identifier[manager] . identifier[port_manager] . identifier[release_udp_port] ( identifier[nio] . identifier[lport] , identifier[self] . identifier[_project] )
keyword[for] identifier[udp_tunnel] keyword[in] identifier[self] . identifier[_local_udp_tunnels] . identifier[values] ():
identifier[self] . identifier[manager] . identifier[port_manager] . identifier[release_udp_port] ( identifier[udp_tunnel] [ literal[int] ]. identifier[lport] , identifier[self] . identifier[_project] )
identifier[self] . identifier[manager] . identifier[port_manager] . identifier[release_udp_port] ( identifier[udp_tunnel] [ literal[int] ]. identifier[lport] , identifier[self] . identifier[_project] )
identifier[self] . identifier[_local_udp_tunnels] ={}
identifier[self] . identifier[acpi_shutdown] = keyword[False]
keyword[yield] keyword[from] identifier[self] . identifier[stop] ()
keyword[if] identifier[self] . identifier[linked_clone] :
identifier[hdd_table] = keyword[yield] keyword[from] identifier[self] . identifier[save_linked_hdds_info] ()
keyword[for] identifier[hdd] keyword[in] identifier[hdd_table] . identifier[copy] ():
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] ,
identifier[id] = identifier[self] . identifier[id] ,
identifier[controller] = identifier[hdd] [ literal[string] ],
identifier[port] = identifier[hdd] [ literal[string] ],
identifier[device] = identifier[hdd] [ literal[string] ]))
keyword[try] :
keyword[yield] keyword[from] identifier[self] . identifier[_storage_attach] ( literal[string] . identifier[format] ( identifier[hdd] [ literal[string] ],
identifier[hdd] [ literal[string] ],
identifier[hdd] [ literal[string] ]))
keyword[except] identifier[VirtualBoxError] keyword[as] identifier[e] :
identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] ,
identifier[id] = identifier[self] . identifier[id] ,
identifier[controller] = identifier[hdd] [ literal[string] ],
identifier[port] = identifier[hdd] [ literal[string] ],
identifier[device] = identifier[hdd] [ literal[string] ],
identifier[error] = identifier[e] ))
keyword[continue]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] , identifier[id] = identifier[self] . identifier[id] ))
keyword[yield] keyword[from] identifier[self] . identifier[manager] . identifier[execute] ( literal[string] ,[ identifier[self] . identifier[_name] ])
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] , identifier[id] = identifier[self] . identifier[id] ))
identifier[self] . identifier[_closed] = keyword[True] | def close(self):
"""
Closes this VirtualBox VM.
"""
if self._closed:
# VM is already closed
return # depends on [control=['if'], data=[]]
if not (yield from super().close()):
return False # depends on [control=['if'], data=[]]
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None # depends on [control=['if'], data=[]]
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['nio']] # depends on [control=['if'], data=['adapter']] # depends on [control=['for'], data=['adapter']]
for udp_tunnel in self._local_udp_tunnels.values():
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project) # depends on [control=['for'], data=['udp_tunnel']]
self._local_udp_tunnels = {}
self.acpi_shutdown = False
yield from self.stop()
if self.linked_clone:
hdd_table = (yield from self.save_linked_hdds_info())
for hdd in hdd_table.copy():
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name, id=self.id, controller=hdd['controller'], port=hdd['port'], device=hdd['device']))
try:
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd['controller'], hdd['port'], hdd['device'])) # depends on [control=['try'], data=[]]
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name, id=self.id, controller=hdd['controller'], port=hdd['port'], device=hdd['device'], error=e))
continue # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['hdd']]
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
yield from self.manager.execute('unregistervm', [self._name]) # depends on [control=['if'], data=[]]
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
self._closed = True |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_config_port_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_config_port_type = ET.SubElement(fcoe_intf_list, "fcoe-intf-config-port-type")
fcoe_intf_config_port_type.text = kwargs.pop('fcoe_intf_config_port_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_config_port_type, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[fcoe_get_interface] assign[=] call[name[ET].Element, parameter[constant[fcoe_get_interface]]]
variable[config] assign[=] name[fcoe_get_interface]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[fcoe_get_interface], constant[output]]]
variable[fcoe_intf_list] assign[=] call[name[ET].SubElement, parameter[name[output], constant[fcoe-intf-list]]]
variable[fcoe_intf_fcoe_port_id_key] assign[=] call[name[ET].SubElement, parameter[name[fcoe_intf_list], constant[fcoe-intf-fcoe-port-id]]]
name[fcoe_intf_fcoe_port_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_intf_fcoe_port_id]]]
variable[fcoe_intf_config_port_type] assign[=] call[name[ET].SubElement, parameter[name[fcoe_intf_list], constant[fcoe-intf-config-port-type]]]
name[fcoe_intf_config_port_type].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_intf_config_port_type]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_config_port_type] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[fcoe_get_interface] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[fcoe_get_interface]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_get_interface] , literal[string] )
identifier[fcoe_intf_list] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[fcoe_intf_fcoe_port_id_key] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_intf_list] , literal[string] )
identifier[fcoe_intf_fcoe_port_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[fcoe_intf_config_port_type] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_intf_list] , literal[string] )
identifier[fcoe_intf_config_port_type] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_config_port_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
fcoe_get_interface = ET.Element('fcoe_get_interface')
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, 'output')
fcoe_intf_list = ET.SubElement(output, 'fcoe-intf-list')
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, 'fcoe-intf-fcoe-port-id')
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_config_port_type = ET.SubElement(fcoe_intf_list, 'fcoe-intf-config-port-type')
fcoe_intf_config_port_type.text = kwargs.pop('fcoe_intf_config_port_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def example_generator(self, encoder, tmp_dir, task_id):
"""Generator for examples.
Args:
encoder: a TextEncoder
tmp_dir: a string
task_id: an integer
Yields:
feature dictionaries
"""
filepaths = self.text_filepaths_for_task(tmp_dir, task_id)
if task_id >= self.num_train_shards:
# this is dev data - limit the total length.
max_chars_per_file = self.max_dev_chars // (
self.num_dev_shards * len(filepaths))
else:
max_chars_per_file = None
tokens = []
for ftext in self.file_generator(
filepaths, max_chars_per_file=max_chars_per_file):
tokens.extend(encoder.encode(ftext))
pos = 0
while pos + self.sequence_length <= len(tokens):
yield {"targets": tokens[pos:pos + self.sequence_length]}
pos += self.sequence_length
if pos > 0:
tokens = tokens[pos:]
if self.remainder_policy == "pad":
if tokens:
targets = tokens + [0] * (self.sequence_length - len(tokens))
yield {"targets": targets}
else:
assert self.remainder_policy == "drop" | def function[example_generator, parameter[self, encoder, tmp_dir, task_id]]:
constant[Generator for examples.
Args:
encoder: a TextEncoder
tmp_dir: a string
task_id: an integer
Yields:
feature dictionaries
]
variable[filepaths] assign[=] call[name[self].text_filepaths_for_task, parameter[name[tmp_dir], name[task_id]]]
if compare[name[task_id] greater_or_equal[>=] name[self].num_train_shards] begin[:]
variable[max_chars_per_file] assign[=] binary_operation[name[self].max_dev_chars <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[name[self].num_dev_shards * call[name[len], parameter[name[filepaths]]]]]
variable[tokens] assign[=] list[[]]
for taget[name[ftext]] in starred[call[name[self].file_generator, parameter[name[filepaths]]]] begin[:]
call[name[tokens].extend, parameter[call[name[encoder].encode, parameter[name[ftext]]]]]
variable[pos] assign[=] constant[0]
while compare[binary_operation[name[pos] + name[self].sequence_length] less_or_equal[<=] call[name[len], parameter[name[tokens]]]] begin[:]
<ast.Yield object at 0x7da18c4cda50>
<ast.AugAssign object at 0x7da18c4cf910>
if compare[name[pos] greater[>] constant[0]] begin[:]
variable[tokens] assign[=] call[name[tokens]][<ast.Slice object at 0x7da18c4cd180>]
if compare[name[self].remainder_policy equal[==] constant[pad]] begin[:]
if name[tokens] begin[:]
variable[targets] assign[=] binary_operation[name[tokens] + binary_operation[list[[<ast.Constant object at 0x7da18c4ccb20>]] * binary_operation[name[self].sequence_length - call[name[len], parameter[name[tokens]]]]]]
<ast.Yield object at 0x7da18c4cfdf0> | keyword[def] identifier[example_generator] ( identifier[self] , identifier[encoder] , identifier[tmp_dir] , identifier[task_id] ):
literal[string]
identifier[filepaths] = identifier[self] . identifier[text_filepaths_for_task] ( identifier[tmp_dir] , identifier[task_id] )
keyword[if] identifier[task_id] >= identifier[self] . identifier[num_train_shards] :
identifier[max_chars_per_file] = identifier[self] . identifier[max_dev_chars] //(
identifier[self] . identifier[num_dev_shards] * identifier[len] ( identifier[filepaths] ))
keyword[else] :
identifier[max_chars_per_file] = keyword[None]
identifier[tokens] =[]
keyword[for] identifier[ftext] keyword[in] identifier[self] . identifier[file_generator] (
identifier[filepaths] , identifier[max_chars_per_file] = identifier[max_chars_per_file] ):
identifier[tokens] . identifier[extend] ( identifier[encoder] . identifier[encode] ( identifier[ftext] ))
identifier[pos] = literal[int]
keyword[while] identifier[pos] + identifier[self] . identifier[sequence_length] <= identifier[len] ( identifier[tokens] ):
keyword[yield] { literal[string] : identifier[tokens] [ identifier[pos] : identifier[pos] + identifier[self] . identifier[sequence_length] ]}
identifier[pos] += identifier[self] . identifier[sequence_length]
keyword[if] identifier[pos] > literal[int] :
identifier[tokens] = identifier[tokens] [ identifier[pos] :]
keyword[if] identifier[self] . identifier[remainder_policy] == literal[string] :
keyword[if] identifier[tokens] :
identifier[targets] = identifier[tokens] +[ literal[int] ]*( identifier[self] . identifier[sequence_length] - identifier[len] ( identifier[tokens] ))
keyword[yield] { literal[string] : identifier[targets] }
keyword[else] :
keyword[assert] identifier[self] . identifier[remainder_policy] == literal[string] | def example_generator(self, encoder, tmp_dir, task_id):
"""Generator for examples.
Args:
encoder: a TextEncoder
tmp_dir: a string
task_id: an integer
Yields:
feature dictionaries
"""
filepaths = self.text_filepaths_for_task(tmp_dir, task_id)
if task_id >= self.num_train_shards:
# this is dev data - limit the total length.
max_chars_per_file = self.max_dev_chars // (self.num_dev_shards * len(filepaths)) # depends on [control=['if'], data=[]]
else:
max_chars_per_file = None
tokens = []
for ftext in self.file_generator(filepaths, max_chars_per_file=max_chars_per_file):
tokens.extend(encoder.encode(ftext))
pos = 0
while pos + self.sequence_length <= len(tokens):
yield {'targets': tokens[pos:pos + self.sequence_length]}
pos += self.sequence_length # depends on [control=['while'], data=[]]
if pos > 0:
tokens = tokens[pos:] # depends on [control=['if'], data=['pos']] # depends on [control=['for'], data=['ftext']]
if self.remainder_policy == 'pad':
if tokens:
targets = tokens + [0] * (self.sequence_length - len(tokens))
yield {'targets': targets} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
assert self.remainder_policy == 'drop' |
def get_task_doc(self, path):
"""
Get the entire task doc for a path, including any post-processing.
"""
logger.info("Getting task doc for base dir :{}".format(path))
files = os.listdir(path)
vasprun_files = OrderedDict()
if "STOPCAR" in files:
#Stopped runs. Try to parse as much as possible.
logger.info(path + " contains stopped run")
for r in self.runs:
if r in files: #try subfolder schema
for f in os.listdir(os.path.join(path, r)):
if fnmatch(f, "vasprun.xml*"):
vasprun_files[r] = os.path.join(r, f)
else: #try extension schema
for f in files:
if fnmatch(f, "vasprun.xml.{}*".format(r)):
vasprun_files[r] = f
if len(vasprun_files) == 0:
for f in files: #get any vasprun from the folder
if fnmatch(f, "vasprun.xml*") and \
f not in vasprun_files.values():
vasprun_files['standard'] = f
if len(vasprun_files) > 0:
d = self.generate_doc(path, vasprun_files)
if not d:
d = self.process_killed_run(path)
self.post_process(path, d)
elif (not (path.endswith("relax1") or
path.endswith("relax2"))) and contains_vasp_input(path):
#If not Materials Project style, process as a killed run.
logger.warning(path + " contains killed run")
d = self.process_killed_run(path)
self.post_process(path, d)
else:
raise ValueError("No VASP files found!")
return d | def function[get_task_doc, parameter[self, path]]:
constant[
Get the entire task doc for a path, including any post-processing.
]
call[name[logger].info, parameter[call[constant[Getting task doc for base dir :{}].format, parameter[name[path]]]]]
variable[files] assign[=] call[name[os].listdir, parameter[name[path]]]
variable[vasprun_files] assign[=] call[name[OrderedDict], parameter[]]
if compare[constant[STOPCAR] in name[files]] begin[:]
call[name[logger].info, parameter[binary_operation[name[path] + constant[ contains stopped run]]]]
for taget[name[r]] in starred[name[self].runs] begin[:]
if compare[name[r] in name[files]] begin[:]
for taget[name[f]] in starred[call[name[os].listdir, parameter[call[name[os].path.join, parameter[name[path], name[r]]]]]] begin[:]
if call[name[fnmatch], parameter[name[f], constant[vasprun.xml*]]] begin[:]
call[name[vasprun_files]][name[r]] assign[=] call[name[os].path.join, parameter[name[r], name[f]]]
if compare[call[name[len], parameter[name[vasprun_files]]] equal[==] constant[0]] begin[:]
for taget[name[f]] in starred[name[files]] begin[:]
if <ast.BoolOp object at 0x7da18fe902b0> begin[:]
call[name[vasprun_files]][constant[standard]] assign[=] name[f]
if compare[call[name[len], parameter[name[vasprun_files]]] greater[>] constant[0]] begin[:]
variable[d] assign[=] call[name[self].generate_doc, parameter[name[path], name[vasprun_files]]]
if <ast.UnaryOp object at 0x7da18fe92800> begin[:]
variable[d] assign[=] call[name[self].process_killed_run, parameter[name[path]]]
call[name[self].post_process, parameter[name[path], name[d]]]
return[name[d]] | keyword[def] identifier[get_task_doc] ( identifier[self] , identifier[path] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[files] = identifier[os] . identifier[listdir] ( identifier[path] )
identifier[vasprun_files] = identifier[OrderedDict] ()
keyword[if] literal[string] keyword[in] identifier[files] :
identifier[logger] . identifier[info] ( identifier[path] + literal[string] )
keyword[for] identifier[r] keyword[in] identifier[self] . identifier[runs] :
keyword[if] identifier[r] keyword[in] identifier[files] :
keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[r] )):
keyword[if] identifier[fnmatch] ( identifier[f] , literal[string] ):
identifier[vasprun_files] [ identifier[r] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[r] , identifier[f] )
keyword[else] :
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] identifier[fnmatch] ( identifier[f] , literal[string] . identifier[format] ( identifier[r] )):
identifier[vasprun_files] [ identifier[r] ]= identifier[f]
keyword[if] identifier[len] ( identifier[vasprun_files] )== literal[int] :
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[if] identifier[fnmatch] ( identifier[f] , literal[string] ) keyword[and] identifier[f] keyword[not] keyword[in] identifier[vasprun_files] . identifier[values] ():
identifier[vasprun_files] [ literal[string] ]= identifier[f]
keyword[if] identifier[len] ( identifier[vasprun_files] )> literal[int] :
identifier[d] = identifier[self] . identifier[generate_doc] ( identifier[path] , identifier[vasprun_files] )
keyword[if] keyword[not] identifier[d] :
identifier[d] = identifier[self] . identifier[process_killed_run] ( identifier[path] )
identifier[self] . identifier[post_process] ( identifier[path] , identifier[d] )
keyword[elif] ( keyword[not] ( identifier[path] . identifier[endswith] ( literal[string] ) keyword[or]
identifier[path] . identifier[endswith] ( literal[string] ))) keyword[and] identifier[contains_vasp_input] ( identifier[path] ):
identifier[logger] . identifier[warning] ( identifier[path] + literal[string] )
identifier[d] = identifier[self] . identifier[process_killed_run] ( identifier[path] )
identifier[self] . identifier[post_process] ( identifier[path] , identifier[d] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[d] | def get_task_doc(self, path):
"""
Get the entire task doc for a path, including any post-processing.
"""
logger.info('Getting task doc for base dir :{}'.format(path))
files = os.listdir(path)
vasprun_files = OrderedDict()
if 'STOPCAR' in files:
#Stopped runs. Try to parse as much as possible.
logger.info(path + ' contains stopped run') # depends on [control=['if'], data=[]]
for r in self.runs:
if r in files: #try subfolder schema
for f in os.listdir(os.path.join(path, r)):
if fnmatch(f, 'vasprun.xml*'):
vasprun_files[r] = os.path.join(r, f) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=['r']]
else: #try extension schema
for f in files:
if fnmatch(f, 'vasprun.xml.{}*'.format(r)):
vasprun_files[r] = f # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=['r']]
if len(vasprun_files) == 0:
for f in files: #get any vasprun from the folder
if fnmatch(f, 'vasprun.xml*') and f not in vasprun_files.values():
vasprun_files['standard'] = f # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=[]]
if len(vasprun_files) > 0:
d = self.generate_doc(path, vasprun_files)
if not d:
d = self.process_killed_run(path) # depends on [control=['if'], data=[]]
self.post_process(path, d) # depends on [control=['if'], data=[]]
elif not (path.endswith('relax1') or path.endswith('relax2')) and contains_vasp_input(path):
#If not Materials Project style, process as a killed run.
logger.warning(path + ' contains killed run')
d = self.process_killed_run(path)
self.post_process(path, d) # depends on [control=['if'], data=[]]
else:
raise ValueError('No VASP files found!')
return d |
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape() | def function[set_split_extents_by_tile_max_bytes, parameter[self]]:
constant[
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
]
name[self].tile_shape assign[=] call[name[calculate_tile_shape_for_max_bytes], parameter[]]
call[name[self].set_split_extents_by_tile_shape, parameter[]] | keyword[def] identifier[set_split_extents_by_tile_max_bytes] ( identifier[self] ):
literal[string]
identifier[self] . identifier[tile_shape] = identifier[calculate_tile_shape_for_max_bytes] (
identifier[array_shape] = identifier[self] . identifier[array_shape] ,
identifier[array_itemsize] = identifier[self] . identifier[array_itemsize] ,
identifier[max_tile_bytes] = identifier[self] . identifier[max_tile_bytes] ,
identifier[max_tile_shape] = identifier[self] . identifier[max_tile_shape] ,
identifier[sub_tile_shape] = identifier[self] . identifier[sub_tile_shape] ,
identifier[halo] = identifier[self] . identifier[halo]
)
identifier[self] . identifier[set_split_extents_by_tile_shape] () | def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = calculate_tile_shape_for_max_bytes(array_shape=self.array_shape, array_itemsize=self.array_itemsize, max_tile_bytes=self.max_tile_bytes, max_tile_shape=self.max_tile_shape, sub_tile_shape=self.sub_tile_shape, halo=self.halo)
self.set_split_extents_by_tile_shape() |
def next_trigger_frequency(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: datetime instance presenting next trigger time of the event """
if utc_now is None:
utc_now = datetime.utcnow()
def wind_days(start_date):
while True:
if self.day_of_week == EVERY_DAY or start_date.weekday() == int(self.day_of_week):
return start_date.replace(hour=self.time_of_day.hour, minute=self.time_of_day.minute)
else:
start_date += timedelta(days=1)
if utc_now.time() > self.time_of_day.time():
return wind_days(utc_now + timedelta(days=1))
else:
return wind_days(utc_now) | def function[next_trigger_frequency, parameter[self, utc_now]]:
constant[ :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: datetime instance presenting next trigger time of the event ]
if compare[name[utc_now] is constant[None]] begin[:]
variable[utc_now] assign[=] call[name[datetime].utcnow, parameter[]]
def function[wind_days, parameter[start_date]]:
while constant[True] begin[:]
if <ast.BoolOp object at 0x7da1b24401f0> begin[:]
return[call[name[start_date].replace, parameter[]]]
if compare[call[name[utc_now].time, parameter[]] greater[>] call[name[self].time_of_day.time, parameter[]]] begin[:]
return[call[name[wind_days], parameter[binary_operation[name[utc_now] + call[name[timedelta], parameter[]]]]]] | keyword[def] identifier[next_trigger_frequency] ( identifier[self] , identifier[utc_now] = keyword[None] ):
literal[string]
keyword[if] identifier[utc_now] keyword[is] keyword[None] :
identifier[utc_now] = identifier[datetime] . identifier[utcnow] ()
keyword[def] identifier[wind_days] ( identifier[start_date] ):
keyword[while] keyword[True] :
keyword[if] identifier[self] . identifier[day_of_week] == identifier[EVERY_DAY] keyword[or] identifier[start_date] . identifier[weekday] ()== identifier[int] ( identifier[self] . identifier[day_of_week] ):
keyword[return] identifier[start_date] . identifier[replace] ( identifier[hour] = identifier[self] . identifier[time_of_day] . identifier[hour] , identifier[minute] = identifier[self] . identifier[time_of_day] . identifier[minute] )
keyword[else] :
identifier[start_date] += identifier[timedelta] ( identifier[days] = literal[int] )
keyword[if] identifier[utc_now] . identifier[time] ()> identifier[self] . identifier[time_of_day] . identifier[time] ():
keyword[return] identifier[wind_days] ( identifier[utc_now] + identifier[timedelta] ( identifier[days] = literal[int] ))
keyword[else] :
keyword[return] identifier[wind_days] ( identifier[utc_now] ) | def next_trigger_frequency(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: datetime instance presenting next trigger time of the event """
if utc_now is None:
utc_now = datetime.utcnow() # depends on [control=['if'], data=['utc_now']]
def wind_days(start_date):
while True:
if self.day_of_week == EVERY_DAY or start_date.weekday() == int(self.day_of_week):
return start_date.replace(hour=self.time_of_day.hour, minute=self.time_of_day.minute) # depends on [control=['if'], data=[]]
else:
start_date += timedelta(days=1) # depends on [control=['while'], data=[]]
if utc_now.time() > self.time_of_day.time():
return wind_days(utc_now + timedelta(days=1)) # depends on [control=['if'], data=[]]
else:
return wind_days(utc_now) |
def event(self, event): # pylint: disable-msg=R0201
"""Handle a stream event.
Called when connection state is changed.
Should not be called with self.lock acquired!
"""
event.stream = self
logger.debug(u"Stream event: {0}".format(event))
self.settings["event_queue"].put(event)
return False | def function[event, parameter[self, event]]:
constant[Handle a stream event.
Called when connection state is changed.
Should not be called with self.lock acquired!
]
name[event].stream assign[=] name[self]
call[name[logger].debug, parameter[call[constant[Stream event: {0}].format, parameter[name[event]]]]]
call[call[name[self].settings][constant[event_queue]].put, parameter[name[event]]]
return[constant[False]] | keyword[def] identifier[event] ( identifier[self] , identifier[event] ):
literal[string]
identifier[event] . identifier[stream] = identifier[self]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[event] ))
identifier[self] . identifier[settings] [ literal[string] ]. identifier[put] ( identifier[event] )
keyword[return] keyword[False] | def event(self, event): # pylint: disable-msg=R0201
'Handle a stream event.\n\n Called when connection state is changed.\n\n Should not be called with self.lock acquired!\n '
event.stream = self
logger.debug(u'Stream event: {0}'.format(event))
self.settings['event_queue'].put(event)
return False |
def hexdump(src, length=8):
""" Produce a string hexdump of src, for debug output."""
if not src:
return str(src)
src = input_validate_str(src, 'src')
offset = 0
result = ''
for this in group(src, length):
hex_s = ' '.join(["%02x" % ord(x) for x in this])
result += "%04X %s\n" % (offset, hex_s)
offset += length
return result | def function[hexdump, parameter[src, length]]:
constant[ Produce a string hexdump of src, for debug output.]
if <ast.UnaryOp object at 0x7da1b120b250> begin[:]
return[call[name[str], parameter[name[src]]]]
variable[src] assign[=] call[name[input_validate_str], parameter[name[src], constant[src]]]
variable[offset] assign[=] constant[0]
variable[result] assign[=] constant[]
for taget[name[this]] in starred[call[name[group], parameter[name[src], name[length]]]] begin[:]
variable[hex_s] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b1208880>]]
<ast.AugAssign object at 0x7da1b120a860>
<ast.AugAssign object at 0x7da1b1209d20>
return[name[result]] | keyword[def] identifier[hexdump] ( identifier[src] , identifier[length] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[src] :
keyword[return] identifier[str] ( identifier[src] )
identifier[src] = identifier[input_validate_str] ( identifier[src] , literal[string] )
identifier[offset] = literal[int]
identifier[result] = literal[string]
keyword[for] identifier[this] keyword[in] identifier[group] ( identifier[src] , identifier[length] ):
identifier[hex_s] = literal[string] . identifier[join] ([ literal[string] % identifier[ord] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[this] ])
identifier[result] += literal[string] %( identifier[offset] , identifier[hex_s] )
identifier[offset] += identifier[length]
keyword[return] identifier[result] | def hexdump(src, length=8):
""" Produce a string hexdump of src, for debug output."""
if not src:
return str(src) # depends on [control=['if'], data=[]]
src = input_validate_str(src, 'src')
offset = 0
result = ''
for this in group(src, length):
hex_s = ' '.join(['%02x' % ord(x) for x in this])
result += '%04X %s\n' % (offset, hex_s)
offset += length # depends on [control=['for'], data=['this']]
return result |
def _lockcmd(subcmd, pkgname=None, **kwargs):
'''
Helper function for lock and unlock commands, because their syntax is identical.
Run the lock/unlock command, and return a list of locked packages
'''
jail = kwargs.pop('jail', None)
chroot = kwargs.pop('chroot', None)
root = kwargs.pop('root', None)
locked_pkgs = []
cmd = _pkg(jail, chroot, root)
cmd.append(subcmd)
cmd.append('-y')
cmd.append('--quiet')
cmd.append('--show-locked')
if pkgname:
cmd.append(pkgname)
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
if out['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered {0}ing packages'.format(subcmd),
info={'result': out}
)
for line in salt.utils.itertools.split(out['stdout'], '\n'):
if not line:
continue
try:
pkgname = line.rsplit('-', 1)[0]
except ValueError:
continue
locked_pkgs.append(pkgname)
log.debug('Locked packages: %s', ', '.join(locked_pkgs))
return locked_pkgs | def function[_lockcmd, parameter[subcmd, pkgname]]:
constant[
Helper function for lock and unlock commands, because their syntax is identical.
Run the lock/unlock command, and return a list of locked packages
]
variable[jail] assign[=] call[name[kwargs].pop, parameter[constant[jail], constant[None]]]
variable[chroot] assign[=] call[name[kwargs].pop, parameter[constant[chroot], constant[None]]]
variable[root] assign[=] call[name[kwargs].pop, parameter[constant[root], constant[None]]]
variable[locked_pkgs] assign[=] list[[]]
variable[cmd] assign[=] call[name[_pkg], parameter[name[jail], name[chroot], name[root]]]
call[name[cmd].append, parameter[name[subcmd]]]
call[name[cmd].append, parameter[constant[-y]]]
call[name[cmd].append, parameter[constant[--quiet]]]
call[name[cmd].append, parameter[constant[--show-locked]]]
if name[pkgname] begin[:]
call[name[cmd].append, parameter[name[pkgname]]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[out]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1c15540>
for taget[name[line]] in starred[call[name[salt].utils.itertools.split, parameter[call[name[out]][constant[stdout]], constant[
]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1c160b0> begin[:]
continue
<ast.Try object at 0x7da1b1c155d0>
call[name[locked_pkgs].append, parameter[name[pkgname]]]
call[name[log].debug, parameter[constant[Locked packages: %s], call[constant[, ].join, parameter[name[locked_pkgs]]]]]
return[name[locked_pkgs]] | keyword[def] identifier[_lockcmd] ( identifier[subcmd] , identifier[pkgname] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[jail] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[chroot] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[root] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[locked_pkgs] =[]
identifier[cmd] = identifier[_pkg] ( identifier[jail] , identifier[chroot] , identifier[root] )
identifier[cmd] . identifier[append] ( identifier[subcmd] )
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[pkgname] :
identifier[cmd] . identifier[append] ( identifier[pkgname] )
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[output_loglevel] = literal[string] , identifier[python_shell] = keyword[False] )
keyword[if] identifier[out] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[subcmd] ),
identifier[info] ={ literal[string] : identifier[out] }
)
keyword[for] identifier[line] keyword[in] identifier[salt] . identifier[utils] . identifier[itertools] . identifier[split] ( identifier[out] [ literal[string] ], literal[string] ):
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[try] :
identifier[pkgname] = identifier[line] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
keyword[except] identifier[ValueError] :
keyword[continue]
identifier[locked_pkgs] . identifier[append] ( identifier[pkgname] )
identifier[log] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[locked_pkgs] ))
keyword[return] identifier[locked_pkgs] | def _lockcmd(subcmd, pkgname=None, **kwargs):
"""
Helper function for lock and unlock commands, because their syntax is identical.
Run the lock/unlock command, and return a list of locked packages
"""
jail = kwargs.pop('jail', None)
chroot = kwargs.pop('chroot', None)
root = kwargs.pop('root', None)
locked_pkgs = []
cmd = _pkg(jail, chroot, root)
cmd.append(subcmd)
cmd.append('-y')
cmd.append('--quiet')
cmd.append('--show-locked')
if pkgname:
cmd.append(pkgname) # depends on [control=['if'], data=[]]
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
if out['retcode'] != 0:
raise CommandExecutionError('Problem encountered {0}ing packages'.format(subcmd), info={'result': out}) # depends on [control=['if'], data=[]]
for line in salt.utils.itertools.split(out['stdout'], '\n'):
if not line:
continue # depends on [control=['if'], data=[]]
try:
pkgname = line.rsplit('-', 1)[0] # depends on [control=['try'], data=[]]
except ValueError:
continue # depends on [control=['except'], data=[]]
locked_pkgs.append(pkgname) # depends on [control=['for'], data=['line']]
log.debug('Locked packages: %s', ', '.join(locked_pkgs))
return locked_pkgs |
def decomprest(f, rows):
"""Iterator that decompresses the rest of a file once the metadata
have been consumed."""
row = 0
while row < rows:
row, o = deblock(f)
yield o | def function[decomprest, parameter[f, rows]]:
constant[Iterator that decompresses the rest of a file once the metadata
have been consumed.]
variable[row] assign[=] constant[0]
while compare[name[row] less[<] name[rows]] begin[:]
<ast.Tuple object at 0x7da1b072d1e0> assign[=] call[name[deblock], parameter[name[f]]]
<ast.Yield object at 0x7da1b072f190> | keyword[def] identifier[decomprest] ( identifier[f] , identifier[rows] ):
literal[string]
identifier[row] = literal[int]
keyword[while] identifier[row] < identifier[rows] :
identifier[row] , identifier[o] = identifier[deblock] ( identifier[f] )
keyword[yield] identifier[o] | def decomprest(f, rows):
"""Iterator that decompresses the rest of a file once the metadata
have been consumed."""
row = 0
while row < rows:
(row, o) = deblock(f)
yield o # depends on [control=['while'], data=['row']] |
def get_mirror(self):
"""Determine mirror match."""
mirror = False
if self.get_diplomacy()['1v1']:
civs = set()
for data in self.get_players():
civs.add(data['civilization'])
mirror = (len(civs) == 1)
return mirror | def function[get_mirror, parameter[self]]:
constant[Determine mirror match.]
variable[mirror] assign[=] constant[False]
if call[call[name[self].get_diplomacy, parameter[]]][constant[1v1]] begin[:]
variable[civs] assign[=] call[name[set], parameter[]]
for taget[name[data]] in starred[call[name[self].get_players, parameter[]]] begin[:]
call[name[civs].add, parameter[call[name[data]][constant[civilization]]]]
variable[mirror] assign[=] compare[call[name[len], parameter[name[civs]]] equal[==] constant[1]]
return[name[mirror]] | keyword[def] identifier[get_mirror] ( identifier[self] ):
literal[string]
identifier[mirror] = keyword[False]
keyword[if] identifier[self] . identifier[get_diplomacy] ()[ literal[string] ]:
identifier[civs] = identifier[set] ()
keyword[for] identifier[data] keyword[in] identifier[self] . identifier[get_players] ():
identifier[civs] . identifier[add] ( identifier[data] [ literal[string] ])
identifier[mirror] =( identifier[len] ( identifier[civs] )== literal[int] )
keyword[return] identifier[mirror] | def get_mirror(self):
"""Determine mirror match."""
mirror = False
if self.get_diplomacy()['1v1']:
civs = set()
for data in self.get_players():
civs.add(data['civilization']) # depends on [control=['for'], data=['data']]
mirror = len(civs) == 1 # depends on [control=['if'], data=[]]
return mirror |
def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict | def function[get_patient_mhc_haplotype, parameter[job, patient_dict]]:
constant[
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
]
variable[haplotype_archive] assign[=] call[name[job].fileStore.readGlobalFile, parameter[call[name[patient_dict]][constant[hla_haplotype_files]]]]
variable[haplotype_archive] assign[=] call[name[untargz], parameter[name[haplotype_archive], call[name[os].getcwd, parameter[]]]]
variable[output_dict] assign[=] dictionary[[], []]
for taget[name[filename]] in starred[tuple[[<ast.Constant object at 0x7da18f00f9a0>, <ast.Constant object at 0x7da18f00cb50>]]] begin[:]
call[name[output_dict]][name[filename]] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[call[name[os].path.join, parameter[name[haplotype_archive], name[filename]]]]]
return[name[output_dict]] | keyword[def] identifier[get_patient_mhc_haplotype] ( identifier[job] , identifier[patient_dict] ):
literal[string]
identifier[haplotype_archive] = identifier[job] . identifier[fileStore] . identifier[readGlobalFile] ( identifier[patient_dict] [ literal[string] ])
identifier[haplotype_archive] = identifier[untargz] ( identifier[haplotype_archive] , identifier[os] . identifier[getcwd] ())
identifier[output_dict] ={}
keyword[for] identifier[filename] keyword[in] literal[string] , literal[string] :
identifier[output_dict] [ identifier[filename] ]= identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[haplotype_archive] ,
identifier[filename] ))
keyword[return] identifier[output_dict] | def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in ('mhci_alleles.list', 'mhcii_alleles.list'):
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive, filename)) # depends on [control=['for'], data=['filename']]
return output_dict |
def get_bhavcopy_url(self, d):
"""take date and return bhavcopy url"""
d = parser.parse(d).date()
day_of_month = d.strftime("%d")
mon = d.strftime("%b").upper()
year = d.year
url = self.bhavcopy_base_url % (year, mon, day_of_month, mon, year)
return url | def function[get_bhavcopy_url, parameter[self, d]]:
constant[take date and return bhavcopy url]
variable[d] assign[=] call[call[name[parser].parse, parameter[name[d]]].date, parameter[]]
variable[day_of_month] assign[=] call[name[d].strftime, parameter[constant[%d]]]
variable[mon] assign[=] call[call[name[d].strftime, parameter[constant[%b]]].upper, parameter[]]
variable[year] assign[=] name[d].year
variable[url] assign[=] binary_operation[name[self].bhavcopy_base_url <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1ddbaf0>, <ast.Name object at 0x7da1b1dd9c00>, <ast.Name object at 0x7da1b1ddae90>, <ast.Name object at 0x7da1b1dda950>, <ast.Name object at 0x7da1b1dda830>]]]
return[name[url]] | keyword[def] identifier[get_bhavcopy_url] ( identifier[self] , identifier[d] ):
literal[string]
identifier[d] = identifier[parser] . identifier[parse] ( identifier[d] ). identifier[date] ()
identifier[day_of_month] = identifier[d] . identifier[strftime] ( literal[string] )
identifier[mon] = identifier[d] . identifier[strftime] ( literal[string] ). identifier[upper] ()
identifier[year] = identifier[d] . identifier[year]
identifier[url] = identifier[self] . identifier[bhavcopy_base_url] %( identifier[year] , identifier[mon] , identifier[day_of_month] , identifier[mon] , identifier[year] )
keyword[return] identifier[url] | def get_bhavcopy_url(self, d):
"""take date and return bhavcopy url"""
d = parser.parse(d).date()
day_of_month = d.strftime('%d')
mon = d.strftime('%b').upper()
year = d.year
url = self.bhavcopy_base_url % (year, mon, day_of_month, mon, year)
return url |
def addValue(self, _, value):
"""Adds a value from the given source."""
if value is not None:
try:
self._count += self._dataFormat.getCount(value)
self._total += self._dataFormat.getValue(value) * self._dataFormat.getCount(value)
except TypeError:
self._count += 1
self._total += value | def function[addValue, parameter[self, _, value]]:
constant[Adds a value from the given source.]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18bcc8d60> | keyword[def] identifier[addValue] ( identifier[self] , identifier[_] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[self] . identifier[_count] += identifier[self] . identifier[_dataFormat] . identifier[getCount] ( identifier[value] )
identifier[self] . identifier[_total] += identifier[self] . identifier[_dataFormat] . identifier[getValue] ( identifier[value] )* identifier[self] . identifier[_dataFormat] . identifier[getCount] ( identifier[value] )
keyword[except] identifier[TypeError] :
identifier[self] . identifier[_count] += literal[int]
identifier[self] . identifier[_total] += identifier[value] | def addValue(self, _, value):
"""Adds a value from the given source."""
if value is not None:
try:
self._count += self._dataFormat.getCount(value)
self._total += self._dataFormat.getValue(value) * self._dataFormat.getCount(value) # depends on [control=['try'], data=[]]
except TypeError:
self._count += 1
self._total += value # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']] |
def distribute(build):
""" distribute the uranium package """
build.packages.install("wheel")
build.packages.install("twine")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "upload",
])
build.executables.run([
"twine", "upload", "dist/*"
]) | def function[distribute, parameter[build]]:
constant[ distribute the uranium package ]
call[name[build].packages.install, parameter[constant[wheel]]]
call[name[build].packages.install, parameter[constant[twine]]]
call[name[build].executables.run, parameter[list[[<ast.Constant object at 0x7da1b0471750>, <ast.Constant object at 0x7da1b0470430>, <ast.Constant object at 0x7da1b0472500>, <ast.Constant object at 0x7da1b04712a0>, <ast.Constant object at 0x7da1b0470ac0>, <ast.Constant object at 0x7da1b0473460>]]]]
call[name[build].executables.run, parameter[list[[<ast.Constant object at 0x7da1b04713f0>, <ast.Constant object at 0x7da1b0472830>, <ast.Constant object at 0x7da1b04720e0>]]]] | keyword[def] identifier[distribute] ( identifier[build] ):
literal[string]
identifier[build] . identifier[packages] . identifier[install] ( literal[string] )
identifier[build] . identifier[packages] . identifier[install] ( literal[string] )
identifier[build] . identifier[executables] . identifier[run] ([
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
])
identifier[build] . identifier[executables] . identifier[run] ([
literal[string] , literal[string] , literal[string]
]) | def distribute(build):
""" distribute the uranium package """
build.packages.install('wheel')
build.packages.install('twine')
build.executables.run(['python', 'setup.py', 'sdist', 'bdist_wheel', '--universal', 'upload'])
build.executables.run(['twine', 'upload', 'dist/*']) |
def operational(ctx, commands, format, xpath):
""" Execute operational mode command(s).
This function will send operational mode commands to a Junos
device. jaide.utils.clean_lines() is used to determine how we are
receiving commands, and ignore comment lines or blank lines in
a command file.
@param ctx: The click context paramter, for receiving the object dictionary
| being manipulated by other previous functions. Needed by any
| function with the @click.pass_context decorator.
@type ctx: click.Context
@param commands: The op commands to send to the device. Can be one of
| four things:
| 1. A single op command as a string.
| 2. A string of comma separated op commands.
| 3. A python list of op commands.
| 4. A filepath of a file with op commands on each
| line.
@type commands: str
@param format: String specifying what format to request for the
| response from the device. Defaults to 'text', but
| also accepts 'xml'.
@type format: str
@param xpath: An xpath expression on which we should filter the results.
| This enforces 'xml' for the format of the response.
@type xpath: str
@returns: None. Functions part of click relating to the command group
| 'main' do not return anything. Click handles passing context
| between the functions and maintaing command order and chaining.
"""
mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)
for ip in ctx.obj['hosts']:
mp_pool.apply_async(wrap.open_connection, args=(ip,
ctx.obj['conn']['username'],
ctx.obj['conn']['password'],
wrap.command, [commands, format, xpath],
ctx.obj['out'],
ctx.obj['conn']['connect_timeout'],
ctx.obj['conn']['session_timeout'],
ctx.obj['conn']['port']), callback=write_out)
mp_pool.close()
mp_pool.join() | def function[operational, parameter[ctx, commands, format, xpath]]:
constant[ Execute operational mode command(s).
This function will send operational mode commands to a Junos
device. jaide.utils.clean_lines() is used to determine how we are
receiving commands, and ignore comment lines or blank lines in
a command file.
@param ctx: The click context paramter, for receiving the object dictionary
| being manipulated by other previous functions. Needed by any
| function with the @click.pass_context decorator.
@type ctx: click.Context
@param commands: The op commands to send to the device. Can be one of
| four things:
| 1. A single op command as a string.
| 2. A string of comma separated op commands.
| 3. A python list of op commands.
| 4. A filepath of a file with op commands on each
| line.
@type commands: str
@param format: String specifying what format to request for the
| response from the device. Defaults to 'text', but
| also accepts 'xml'.
@type format: str
@param xpath: An xpath expression on which we should filter the results.
| This enforces 'xml' for the format of the response.
@type xpath: str
@returns: None. Functions part of click relating to the command group
| 'main' do not return anything. Click handles passing context
| between the functions and maintaing command order and chaining.
]
variable[mp_pool] assign[=] call[name[multiprocessing].Pool, parameter[binary_operation[call[name[multiprocessing].cpu_count, parameter[]] * constant[2]]]]
for taget[name[ip]] in starred[call[name[ctx].obj][constant[hosts]]] begin[:]
call[name[mp_pool].apply_async, parameter[name[wrap].open_connection]]
call[name[mp_pool].close, parameter[]]
call[name[mp_pool].join, parameter[]] | keyword[def] identifier[operational] ( identifier[ctx] , identifier[commands] , identifier[format] , identifier[xpath] ):
literal[string]
identifier[mp_pool] = identifier[multiprocessing] . identifier[Pool] ( identifier[multiprocessing] . identifier[cpu_count] ()* literal[int] )
keyword[for] identifier[ip] keyword[in] identifier[ctx] . identifier[obj] [ literal[string] ]:
identifier[mp_pool] . identifier[apply_async] ( identifier[wrap] . identifier[open_connection] , identifier[args] =( identifier[ip] ,
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[wrap] . identifier[command] ,[ identifier[commands] , identifier[format] , identifier[xpath] ],
identifier[ctx] . identifier[obj] [ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ]), identifier[callback] = identifier[write_out] )
identifier[mp_pool] . identifier[close] ()
identifier[mp_pool] . identifier[join] () | def operational(ctx, commands, format, xpath):
""" Execute operational mode command(s).
This function will send operational mode commands to a Junos
device. jaide.utils.clean_lines() is used to determine how we are
receiving commands, and ignore comment lines or blank lines in
a command file.
@param ctx: The click context paramter, for receiving the object dictionary
| being manipulated by other previous functions. Needed by any
| function with the @click.pass_context decorator.
@type ctx: click.Context
@param commands: The op commands to send to the device. Can be one of
| four things:
| 1. A single op command as a string.
| 2. A string of comma separated op commands.
| 3. A python list of op commands.
| 4. A filepath of a file with op commands on each
| line.
@type commands: str
@param format: String specifying what format to request for the
| response from the device. Defaults to 'text', but
| also accepts 'xml'.
@type format: str
@param xpath: An xpath expression on which we should filter the results.
| This enforces 'xml' for the format of the response.
@type xpath: str
@returns: None. Functions part of click relating to the command group
| 'main' do not return anything. Click handles passing context
| between the functions and maintaing command order and chaining.
"""
mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)
for ip in ctx.obj['hosts']:
mp_pool.apply_async(wrap.open_connection, args=(ip, ctx.obj['conn']['username'], ctx.obj['conn']['password'], wrap.command, [commands, format, xpath], ctx.obj['out'], ctx.obj['conn']['connect_timeout'], ctx.obj['conn']['session_timeout'], ctx.obj['conn']['port']), callback=write_out) # depends on [control=['for'], data=['ip']]
mp_pool.close()
mp_pool.join() |
def stream_events(self, filter: Callable[[Event], bool] = None, *, max_queue_size: int = 0):
"""Shortcut for calling :func:`stream_events` with this signal in the first argument."""
return stream_events([self], filter, max_queue_size=max_queue_size) | def function[stream_events, parameter[self, filter]]:
constant[Shortcut for calling :func:`stream_events` with this signal in the first argument.]
return[call[name[stream_events], parameter[list[[<ast.Name object at 0x7da1b0416f50>]], name[filter]]]] | keyword[def] identifier[stream_events] ( identifier[self] , identifier[filter] : identifier[Callable] [[ identifier[Event] ], identifier[bool] ]= keyword[None] ,*, identifier[max_queue_size] : identifier[int] = literal[int] ):
literal[string]
keyword[return] identifier[stream_events] ([ identifier[self] ], identifier[filter] , identifier[max_queue_size] = identifier[max_queue_size] ) | def stream_events(self, filter: Callable[[Event], bool]=None, *, max_queue_size: int=0):
"""Shortcut for calling :func:`stream_events` with this signal in the first argument."""
return stream_events([self], filter, max_queue_size=max_queue_size) |
def set_checksum(self):
"""Set byte 14 of the userdata to a checksum value."""
data_sum = self.cmd1 + self.cmd2
for i in range(1, 14):
data_sum += self._userdata['d{:d}'.format(i)]
chksum = 0xff - (data_sum & 0xff) + 1
self._userdata['d14'] = chksum | def function[set_checksum, parameter[self]]:
constant[Set byte 14 of the userdata to a checksum value.]
variable[data_sum] assign[=] binary_operation[name[self].cmd1 + name[self].cmd2]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], constant[14]]]] begin[:]
<ast.AugAssign object at 0x7da1b1a790f0>
variable[chksum] assign[=] binary_operation[binary_operation[constant[255] - binary_operation[name[data_sum] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]] + constant[1]]
call[name[self]._userdata][constant[d14]] assign[=] name[chksum] | keyword[def] identifier[set_checksum] ( identifier[self] ):
literal[string]
identifier[data_sum] = identifier[self] . identifier[cmd1] + identifier[self] . identifier[cmd2]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[data_sum] += identifier[self] . identifier[_userdata] [ literal[string] . identifier[format] ( identifier[i] )]
identifier[chksum] = literal[int] -( identifier[data_sum] & literal[int] )+ literal[int]
identifier[self] . identifier[_userdata] [ literal[string] ]= identifier[chksum] | def set_checksum(self):
"""Set byte 14 of the userdata to a checksum value."""
data_sum = self.cmd1 + self.cmd2
for i in range(1, 14):
data_sum += self._userdata['d{:d}'.format(i)] # depends on [control=['for'], data=['i']]
chksum = 255 - (data_sum & 255) + 1
self._userdata['d14'] = chksum |
def _get_sector(self, channel, nlines, ncols):
"""Determine which sector was scanned"""
if self._is_vis(channel):
margin = 100
sectors_ref = self.vis_sectors
else:
margin = 50
sectors_ref = self.ir_sectors
for (nlines_ref, ncols_ref), sector in sectors_ref.items():
if np.fabs(ncols - ncols_ref) < margin and \
np.fabs(nlines - nlines_ref) < margin:
return sector
return UNKNOWN_SECTOR | def function[_get_sector, parameter[self, channel, nlines, ncols]]:
constant[Determine which sector was scanned]
if call[name[self]._is_vis, parameter[name[channel]]] begin[:]
variable[margin] assign[=] constant[100]
variable[sectors_ref] assign[=] name[self].vis_sectors
for taget[tuple[[<ast.Tuple object at 0x7da1b22f94e0>, <ast.Name object at 0x7da1b22f8610>]]] in starred[call[name[sectors_ref].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b22fa170> begin[:]
return[name[sector]]
return[name[UNKNOWN_SECTOR]] | keyword[def] identifier[_get_sector] ( identifier[self] , identifier[channel] , identifier[nlines] , identifier[ncols] ):
literal[string]
keyword[if] identifier[self] . identifier[_is_vis] ( identifier[channel] ):
identifier[margin] = literal[int]
identifier[sectors_ref] = identifier[self] . identifier[vis_sectors]
keyword[else] :
identifier[margin] = literal[int]
identifier[sectors_ref] = identifier[self] . identifier[ir_sectors]
keyword[for] ( identifier[nlines_ref] , identifier[ncols_ref] ), identifier[sector] keyword[in] identifier[sectors_ref] . identifier[items] ():
keyword[if] identifier[np] . identifier[fabs] ( identifier[ncols] - identifier[ncols_ref] )< identifier[margin] keyword[and] identifier[np] . identifier[fabs] ( identifier[nlines] - identifier[nlines_ref] )< identifier[margin] :
keyword[return] identifier[sector]
keyword[return] identifier[UNKNOWN_SECTOR] | def _get_sector(self, channel, nlines, ncols):
"""Determine which sector was scanned"""
if self._is_vis(channel):
margin = 100
sectors_ref = self.vis_sectors # depends on [control=['if'], data=[]]
else:
margin = 50
sectors_ref = self.ir_sectors
for ((nlines_ref, ncols_ref), sector) in sectors_ref.items():
if np.fabs(ncols - ncols_ref) < margin and np.fabs(nlines - nlines_ref) < margin:
return sector # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return UNKNOWN_SECTOR |
def symmetric_difference(self, other):
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__xor__
self.cross_product(other, operation)
return self | def function[symmetric_difference, parameter[self, other]]:
constant[Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
]
variable[operation] assign[=] name[bool].__xor__
call[name[self].cross_product, parameter[name[other], name[operation]]]
return[name[self]] | keyword[def] identifier[symmetric_difference] ( identifier[self] , identifier[other] ):
literal[string]
identifier[operation] = identifier[bool] . identifier[__xor__]
identifier[self] . identifier[cross_product] ( identifier[other] , identifier[operation] )
keyword[return] identifier[self] | def symmetric_difference(self, other):
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__xor__
self.cross_product(other, operation)
return self |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.uniform(
low=0.1,
high=0.9,
size=((self.__batch_size, self.__seq_len, self.__dim))
)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | def function[generate, parameter[self]]:
constant[
Generate noise samples.
Returns:
`np.ndarray` of samples.
]
variable[generated_arr] assign[=] call[name[np].random.uniform, parameter[]]
if compare[name[self].noise_sampler is_not constant[None]] begin[:]
name[self].noise_sampler.output_shape assign[=] name[generated_arr].shape
<ast.AugAssign object at 0x7da1b0831600>
return[name[generated_arr]] | keyword[def] identifier[generate] ( identifier[self] ):
literal[string]
identifier[generated_arr] = identifier[np] . identifier[random] . identifier[uniform] (
identifier[low] = literal[int] ,
identifier[high] = literal[int] ,
identifier[size] =(( identifier[self] . identifier[__batch_size] , identifier[self] . identifier[__seq_len] , identifier[self] . identifier[__dim] ))
)
keyword[if] identifier[self] . identifier[noise_sampler] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[noise_sampler] . identifier[output_shape] = identifier[generated_arr] . identifier[shape]
identifier[generated_arr] += identifier[self] . identifier[noise_sampler] . identifier[generate] ()
keyword[return] identifier[generated_arr] | def generate(self):
"""
Generate noise samples.
Returns:
`np.ndarray` of samples.
"""
generated_arr = np.random.uniform(low=0.1, high=0.9, size=(self.__batch_size, self.__seq_len, self.__dim))
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate() # depends on [control=['if'], data=[]]
return generated_arr |
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass | def function[_prompt_changer, parameter[attr, val]]:
constant[Change the current prompt theme]
<ast.Try object at 0x7da1b21c4310>
<ast.Try object at 0x7da1b21c7130> | keyword[def] identifier[_prompt_changer] ( identifier[attr] , identifier[val] ):
literal[string]
keyword[try] :
identifier[sys] . identifier[ps1] = identifier[conf] . identifier[color_theme] . identifier[prompt] ( identifier[conf] . identifier[prompt] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[apply_ipython_style] ( identifier[get_ipython] ())
keyword[except] identifier[NameError] :
keyword[pass] | def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
try:
apply_ipython_style(get_ipython()) # depends on [control=['try'], data=[]]
except NameError:
pass # depends on [control=['except'], data=[]] |
def select_action_key(self, next_action_arr, next_q_arr):
'''
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
'''
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
key = np.random.randint(low=0, high=next_action_arr.shape[0])
else:
key = next_q_arr.argmax()
return key | def function[select_action_key, parameter[self, next_action_arr, next_q_arr]]:
constant[
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
]
variable[epsilon_greedy_flag] assign[=] call[name[bool], parameter[call[name[np].random.binomial, parameter[]]]]
if compare[name[epsilon_greedy_flag] is constant[False]] begin[:]
variable[key] assign[=] call[name[np].random.randint, parameter[]]
return[name[key]] | keyword[def] identifier[select_action_key] ( identifier[self] , identifier[next_action_arr] , identifier[next_q_arr] ):
literal[string]
identifier[epsilon_greedy_flag] = identifier[bool] ( identifier[np] . identifier[random] . identifier[binomial] ( identifier[n] = literal[int] , identifier[p] = identifier[self] . identifier[epsilon_greedy_rate] ))
keyword[if] identifier[epsilon_greedy_flag] keyword[is] keyword[False] :
identifier[key] = identifier[np] . identifier[random] . identifier[randint] ( identifier[low] = literal[int] , identifier[high] = identifier[next_action_arr] . identifier[shape] [ literal[int] ])
keyword[else] :
identifier[key] = identifier[next_q_arr] . identifier[argmax] ()
keyword[return] identifier[key] | def select_action_key(self, next_action_arr, next_q_arr):
"""
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
"""
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
key = np.random.randint(low=0, high=next_action_arr.shape[0]) # depends on [control=['if'], data=[]]
else:
key = next_q_arr.argmax()
return key |
def onColorPicker(self):
"""
Show color-picker dialog to select color.
Qt will use the native dialog by default.
"""
dlg = QtGui.QColorDialog(QtGui.QColor(self._color), None)
# if self._color:
# dlg.setCurrentColor(QtGui.QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name()) | def function[onColorPicker, parameter[self]]:
constant[
Show color-picker dialog to select color.
Qt will use the native dialog by default.
]
variable[dlg] assign[=] call[name[QtGui].QColorDialog, parameter[call[name[QtGui].QColor, parameter[name[self]._color]], constant[None]]]
if call[name[dlg].exec_, parameter[]] begin[:]
call[name[self].setColor, parameter[call[call[name[dlg].currentColor, parameter[]].name, parameter[]]]] | keyword[def] identifier[onColorPicker] ( identifier[self] ):
literal[string]
identifier[dlg] = identifier[QtGui] . identifier[QColorDialog] ( identifier[QtGui] . identifier[QColor] ( identifier[self] . identifier[_color] ), keyword[None] )
keyword[if] identifier[dlg] . identifier[exec_] ():
identifier[self] . identifier[setColor] ( identifier[dlg] . identifier[currentColor] (). identifier[name] ()) | def onColorPicker(self):
"""
Show color-picker dialog to select color.
Qt will use the native dialog by default.
"""
dlg = QtGui.QColorDialog(QtGui.QColor(self._color), None)
# if self._color:
# dlg.setCurrentColor(QtGui.QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name()) # depends on [control=['if'], data=[]] |
def bwar_pitch(return_all=False):
"""
Get data from war_daily_pitch table. Returns WAR, its components, and a few other useful stats.
To get all fields from this table, supply argument return_all=True.
"""
url = "http://www.baseball-reference.com/data/war_daily_pitch.txt"
s = requests.get(url).content
c=pd.read_csv(io.StringIO(s.decode('utf-8')))
if return_all:
return c
else:
cols_to_keep = ['name_common', 'mlb_ID', 'player_ID', 'year_ID', 'team_ID', 'stint_ID', 'lg_ID',
'G', 'GS', 'RA','xRA', 'BIP', 'BIP_perc','salary', 'ERA_plus', 'WAR_rep', 'WAA',
'WAA_adj','WAR']
return c[cols_to_keep] | def function[bwar_pitch, parameter[return_all]]:
constant[
Get data from war_daily_pitch table. Returns WAR, its components, and a few other useful stats.
To get all fields from this table, supply argument return_all=True.
]
variable[url] assign[=] constant[http://www.baseball-reference.com/data/war_daily_pitch.txt]
variable[s] assign[=] call[name[requests].get, parameter[name[url]]].content
variable[c] assign[=] call[name[pd].read_csv, parameter[call[name[io].StringIO, parameter[call[name[s].decode, parameter[constant[utf-8]]]]]]]
if name[return_all] begin[:]
return[name[c]] | keyword[def] identifier[bwar_pitch] ( identifier[return_all] = keyword[False] ):
literal[string]
identifier[url] = literal[string]
identifier[s] = identifier[requests] . identifier[get] ( identifier[url] ). identifier[content]
identifier[c] = identifier[pd] . identifier[read_csv] ( identifier[io] . identifier[StringIO] ( identifier[s] . identifier[decode] ( literal[string] )))
keyword[if] identifier[return_all] :
keyword[return] identifier[c]
keyword[else] :
identifier[cols_to_keep] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ]
keyword[return] identifier[c] [ identifier[cols_to_keep] ] | def bwar_pitch(return_all=False):
"""
Get data from war_daily_pitch table. Returns WAR, its components, and a few other useful stats.
To get all fields from this table, supply argument return_all=True.
"""
url = 'http://www.baseball-reference.com/data/war_daily_pitch.txt'
s = requests.get(url).content
c = pd.read_csv(io.StringIO(s.decode('utf-8')))
if return_all:
return c # depends on [control=['if'], data=[]]
else:
cols_to_keep = ['name_common', 'mlb_ID', 'player_ID', 'year_ID', 'team_ID', 'stint_ID', 'lg_ID', 'G', 'GS', 'RA', 'xRA', 'BIP', 'BIP_perc', 'salary', 'ERA_plus', 'WAR_rep', 'WAA', 'WAA_adj', 'WAR']
return c[cols_to_keep] |
def to_json_string(self, indent=None):
''' Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str
'''
root_ids = []
for r in self._roots:
root_ids.append(r.id)
root_references = self._all_models.values()
json = {
'title' : self.title,
'roots' : {
'root_ids' : root_ids,
'references' : references_json(root_references)
},
'version' : __version__
}
return serialize_json(json, indent=indent) | def function[to_json_string, parameter[self, indent]]:
constant[ Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str
]
variable[root_ids] assign[=] list[[]]
for taget[name[r]] in starred[name[self]._roots] begin[:]
call[name[root_ids].append, parameter[name[r].id]]
variable[root_references] assign[=] call[name[self]._all_models.values, parameter[]]
variable[json] assign[=] dictionary[[<ast.Constant object at 0x7da2043475e0>, <ast.Constant object at 0x7da204345f90>, <ast.Constant object at 0x7da204347be0>], [<ast.Attribute object at 0x7da2043467a0>, <ast.Dict object at 0x7da204347670>, <ast.Name object at 0x7da204344520>]]
return[call[name[serialize_json], parameter[name[json]]]] | keyword[def] identifier[to_json_string] ( identifier[self] , identifier[indent] = keyword[None] ):
literal[string]
identifier[root_ids] =[]
keyword[for] identifier[r] keyword[in] identifier[self] . identifier[_roots] :
identifier[root_ids] . identifier[append] ( identifier[r] . identifier[id] )
identifier[root_references] = identifier[self] . identifier[_all_models] . identifier[values] ()
identifier[json] ={
literal[string] : identifier[self] . identifier[title] ,
literal[string] :{
literal[string] : identifier[root_ids] ,
literal[string] : identifier[references_json] ( identifier[root_references] )
},
literal[string] : identifier[__version__]
}
keyword[return] identifier[serialize_json] ( identifier[json] , identifier[indent] = identifier[indent] ) | def to_json_string(self, indent=None):
""" Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str
"""
root_ids = []
for r in self._roots:
root_ids.append(r.id) # depends on [control=['for'], data=['r']]
root_references = self._all_models.values()
json = {'title': self.title, 'roots': {'root_ids': root_ids, 'references': references_json(root_references)}, 'version': __version__}
return serialize_json(json, indent=indent) |
def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R | def function[nb_fit, parameter[data, P_init, R_init, epsilon, max_iters]]:
constant[
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
]
variable[means] assign[=] call[name[data].mean, parameter[constant[1]]]
variable[variances] assign[=] call[name[data].var, parameter[constant[1]]]
if call[compare[name[means] greater[>] name[variances]].any, parameter[]] begin[:]
<ast.Raise object at 0x7da1b1a2ceb0>
<ast.Tuple object at 0x7da1b1a2f100> assign[=] name[data].shape
variable[P] assign[=] binary_operation[constant[1.0] - binary_operation[name[means] / name[variances]]]
variable[R] assign[=] binary_operation[binary_operation[name[means] * binary_operation[constant[1] - name[P]]] / name[P]]
for taget[name[i]] in starred[call[name[range], parameter[name[genes]]]] begin[:]
variable[result] assign[=] call[name[minimize], parameter[name[nb_ll_row], list[[<ast.Subscript object at 0x7da1b1a2fbe0>, <ast.Subscript object at 0x7da1b1a2ca90>]]]]
variable[params] assign[=] name[result].x
call[name[P]][name[i]] assign[=] call[name[params]][constant[0]]
call[name[R]][name[i]] assign[=] call[name[params]][constant[1]]
return[tuple[[<ast.Name object at 0x7da20c76fcd0>, <ast.Name object at 0x7da20c76e950>]]] | keyword[def] identifier[nb_fit] ( identifier[data] , identifier[P_init] = keyword[None] , identifier[R_init] = keyword[None] , identifier[epsilon] = literal[int] , identifier[max_iters] = literal[int] ):
literal[string]
identifier[means] = identifier[data] . identifier[mean] ( literal[int] )
identifier[variances] = identifier[data] . identifier[var] ( literal[int] )
keyword[if] ( identifier[means] > identifier[variances] ). identifier[any] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[genes] , identifier[cells] = identifier[data] . identifier[shape]
identifier[P] = literal[int] - identifier[means] / identifier[variances]
identifier[R] = identifier[means] *( literal[int] - identifier[P] )/ identifier[P]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[genes] ):
identifier[result] = identifier[minimize] ( identifier[nb_ll_row] ,[ identifier[P] [ identifier[i] ], identifier[R] [ identifier[i] ]], identifier[args] =( identifier[data] [ identifier[i] ,:],),
identifier[bounds] =[( literal[int] , literal[int] ),( identifier[eps] , keyword[None] )])
identifier[params] = identifier[result] . identifier[x]
identifier[P] [ identifier[i] ]= identifier[params] [ literal[int] ]
identifier[R] [ identifier[i] ]= identifier[params] [ literal[int] ]
keyword[return] identifier[P] , identifier[R] | def nb_fit(data, P_init=None, R_init=None, epsilon=1e-08, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError('For NB fit, means must be less than variances') # depends on [control=['if'], data=[]]
(genes, cells) = data.shape
# method of moments
P = 1.0 - means / variances
R = means * (1 - P) / P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i, :],), bounds=[(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1] # depends on [control=['for'], data=['i']]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return (P, R) |
def yield_to_ioloop(self):
"""Function that will allow Rejected to process IOLoop events while
in a tight-loop inside an asynchronous consumer.
.. code-block:: python
:caption: Example Usage
class Consumer(consumer.Consumer):
@gen.coroutine
def process(self):
for iteration in range(0, 1000000):
yield self.yield_to_ioloop()
"""
try:
yield self._yield_condition.wait(
self._message.channel.connection.ioloop.time() + 0.001)
except gen.TimeoutError:
pass | def function[yield_to_ioloop, parameter[self]]:
constant[Function that will allow Rejected to process IOLoop events while
in a tight-loop inside an asynchronous consumer.
.. code-block:: python
:caption: Example Usage
class Consumer(consumer.Consumer):
@gen.coroutine
def process(self):
for iteration in range(0, 1000000):
yield self.yield_to_ioloop()
]
<ast.Try object at 0x7da1b0b301f0> | keyword[def] identifier[yield_to_ioloop] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[yield] identifier[self] . identifier[_yield_condition] . identifier[wait] (
identifier[self] . identifier[_message] . identifier[channel] . identifier[connection] . identifier[ioloop] . identifier[time] ()+ literal[int] )
keyword[except] identifier[gen] . identifier[TimeoutError] :
keyword[pass] | def yield_to_ioloop(self):
"""Function that will allow Rejected to process IOLoop events while
in a tight-loop inside an asynchronous consumer.
.. code-block:: python
:caption: Example Usage
class Consumer(consumer.Consumer):
@gen.coroutine
def process(self):
for iteration in range(0, 1000000):
yield self.yield_to_ioloop()
"""
try:
yield self._yield_condition.wait(self._message.channel.connection.ioloop.time() + 0.001) # depends on [control=['try'], data=[]]
except gen.TimeoutError:
pass # depends on [control=['except'], data=[]] |
def dict_where_len0(dict_):
"""
Accepts a dict of lists. Returns keys that have vals with no length
"""
keys = np.array(dict_.keys())
flags = np.array(list(map(len, dict_.values()))) == 0
indices = np.where(flags)[0]
return keys[indices] | def function[dict_where_len0, parameter[dict_]]:
constant[
Accepts a dict of lists. Returns keys that have vals with no length
]
variable[keys] assign[=] call[name[np].array, parameter[call[name[dict_].keys, parameter[]]]]
variable[flags] assign[=] compare[call[name[np].array, parameter[call[name[list], parameter[call[name[map], parameter[name[len], call[name[dict_].values, parameter[]]]]]]]] equal[==] constant[0]]
variable[indices] assign[=] call[call[name[np].where, parameter[name[flags]]]][constant[0]]
return[call[name[keys]][name[indices]]] | keyword[def] identifier[dict_where_len0] ( identifier[dict_] ):
literal[string]
identifier[keys] = identifier[np] . identifier[array] ( identifier[dict_] . identifier[keys] ())
identifier[flags] = identifier[np] . identifier[array] ( identifier[list] ( identifier[map] ( identifier[len] , identifier[dict_] . identifier[values] ())))== literal[int]
identifier[indices] = identifier[np] . identifier[where] ( identifier[flags] )[ literal[int] ]
keyword[return] identifier[keys] [ identifier[indices] ] | def dict_where_len0(dict_):
"""
Accepts a dict of lists. Returns keys that have vals with no length
"""
keys = np.array(dict_.keys())
flags = np.array(list(map(len, dict_.values()))) == 0
indices = np.where(flags)[0]
return keys[indices] |
def save_replay(self):
"""Save a replay, returning the data."""
res = self._client.send(save_replay=sc_pb.RequestSaveReplay())
return res.data | def function[save_replay, parameter[self]]:
constant[Save a replay, returning the data.]
variable[res] assign[=] call[name[self]._client.send, parameter[]]
return[name[res].data] | keyword[def] identifier[save_replay] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] . identifier[_client] . identifier[send] ( identifier[save_replay] = identifier[sc_pb] . identifier[RequestSaveReplay] ())
keyword[return] identifier[res] . identifier[data] | def save_replay(self):
"""Save a replay, returning the data."""
res = self._client.send(save_replay=sc_pb.RequestSaveReplay())
return res.data |
def _set_computation_mode(self, v, load=False):
"""
Setter method for computation_mode, mapped from YANG variable /mpls_state/lsp/frr/computation_mode (lsp-cspf-computation-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_computation_mode() directly.
YANG Description: lsp frr computation mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'cspf-computation-mode-default': {'value': 1}, u'cspf-computation-mode-use-bypass-metric': {'value': 2}, u'cspf-computation-mode-use-igp-metric-global': {'value': 7}, u'cspf-computation-mode-use-igp-metric': {'value': 5}, u'cspf-computation-mode-use-te-metric': {'value': 4}, u'cspf-computation-mode-use-bypass-liberal': {'value': 3}, u'cspf-computation-mode-use-te-metric-global': {'value': 6}},), is_leaf=True, yang_name="computation-mode", rest_name="computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-cspf-computation-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """computation_mode must be of a type compatible with lsp-cspf-computation-mode""",
'defined-type': "brocade-mpls-operational:lsp-cspf-computation-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'cspf-computation-mode-default': {'value': 1}, u'cspf-computation-mode-use-bypass-metric': {'value': 2}, u'cspf-computation-mode-use-igp-metric-global': {'value': 7}, u'cspf-computation-mode-use-igp-metric': {'value': 5}, u'cspf-computation-mode-use-te-metric': {'value': 4}, u'cspf-computation-mode-use-bypass-liberal': {'value': 3}, u'cspf-computation-mode-use-te-metric-global': {'value': 6}},), is_leaf=True, yang_name="computation-mode", rest_name="computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-cspf-computation-mode', is_config=False)""",
})
self.__computation_mode = t
if hasattr(self, '_set'):
self._set() | def function[_set_computation_mode, parameter[self, v, load]]:
constant[
Setter method for computation_mode, mapped from YANG variable /mpls_state/lsp/frr/computation_mode (lsp-cspf-computation-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_computation_mode() directly.
YANG Description: lsp frr computation mode
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f00d000>
name[self].__computation_mode assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_computation_mode] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__computation_mode] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_computation_mode(self, v, load=False):
"""
Setter method for computation_mode, mapped from YANG variable /mpls_state/lsp/frr/computation_mode (lsp-cspf-computation-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_computation_mode() directly.
YANG Description: lsp frr computation mode
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'cspf-computation-mode-default': {'value': 1}, u'cspf-computation-mode-use-bypass-metric': {'value': 2}, u'cspf-computation-mode-use-igp-metric-global': {'value': 7}, u'cspf-computation-mode-use-igp-metric': {'value': 5}, u'cspf-computation-mode-use-te-metric': {'value': 4}, u'cspf-computation-mode-use-bypass-liberal': {'value': 3}, u'cspf-computation-mode-use-te-metric-global': {'value': 6}}), is_leaf=True, yang_name='computation-mode', rest_name='computation-mode', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-cspf-computation-mode', is_config=False) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'computation_mode must be of a type compatible with lsp-cspf-computation-mode', 'defined-type': 'brocade-mpls-operational:lsp-cspf-computation-mode', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'cspf-computation-mode-default\': {\'value\': 1}, u\'cspf-computation-mode-use-bypass-metric\': {\'value\': 2}, u\'cspf-computation-mode-use-igp-metric-global\': {\'value\': 7}, u\'cspf-computation-mode-use-igp-metric\': {\'value\': 5}, u\'cspf-computation-mode-use-te-metric\': {\'value\': 4}, u\'cspf-computation-mode-use-bypass-liberal\': {\'value\': 3}, u\'cspf-computation-mode-use-te-metric-global\': {\'value\': 6}},), is_leaf=True, yang_name="computation-mode", rest_name="computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=\'urn:brocade.com:mgmt:brocade-mpls-operational\', defining_module=\'brocade-mpls-operational\', yang_type=\'lsp-cspf-computation-mode\', is_config=False)'}) # depends on [control=['except'], data=[]]
self.__computation_mode = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def run_sge(target, jobs, n=1, nproc=None, path='.', delete=True, wait=True, environ=None, modules=(), **kwargs):
"""
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel on the Sun Grid Engine einvironment.
Parameters
----------
target : function
A function to be evaluated. The function must accepts three arguments,
which are a list of arguments given as `jobs`, a job and task id (int).
This function can not be a lambda.
jobs : list
A list of arguments passed to the function.
All the argument must be picklable.
n : int, optional
A number of tasks. Repeat the evaluation `n` times for each job.
1 for default.
nproc : int, optional
A number of cores available once.
If nothing is given, it runs with no limit.
path : str, optional
A path for temporary files to be saved. The path is created if not exists.
The current directory is used as its default.
delete : bool, optional
Whether it removes temporary files after the successful execution.
True for default.
wait : bool, optional
Whether it waits until all jobs are finished. If False, it just submits jobs.
True for default.
environ : dict, optional
An environment variables used when running jobs.
"PYTHONPATH" and "LD_LIBRARY_PATH" is inherited when no `environ` is given.
modules : list, optional
A list of module names imported before evaluating the given function.
The modules are loaded as: `from [module] import *`.
Returns
-------
results : list
A list of results. Each element is a list containing `n` results.
Examples
--------
>>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))
>>> def target(args, job_id, task_id):
... return (args[1] * args[0])
...
>>> run_sge(target, jobs, nproc=2, path='.tmp')
[['spam'], ['hamham'], ['eggseggseggs']]
>>> def target(args, job_id, task_id):
... return "{:d} {}".format(task_id, args[1] * args[0])
...
>>> run_sge(target, jobs, n=2, nproc=2, path='.tmp')
[['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
"""
logging.basicConfig(level=logging.DEBUG)
if isinstance(target, types.LambdaType) and target.__name__ == "<lambda>":
raise RuntimeError("A lambda function is not accepted")
# src = textwrap.dedent(inspect.getsource(singlerun)).replace(r'"', r'\"')
src = textwrap.dedent(inspect.getsource(target)).replace(r'"', r'\"')
if re.match('[\s\t]+', src.split('\n')[0]) is not None:
raise RuntimeError(
"Wrong indentation was found in the source translated")
if not os.path.isdir(path):
os.makedirs(path) #XXX: MYOB
if environ is None:
environ = {}
keys = ("LD_LIBRARY_PATH", "PYTHONPATH")
for key in keys:
if key in os.environ.keys():
environ[key] = os.environ[key]
if "PYTHONPATH" in environ.keys() and environ["PYTHONPATH"].strip() != "":
environ["PYTHONPATH"] = "{}:{}".format(os.getcwd(), environ["PYTHONPATH"])
else:
environ["PYTHONPATH"] = os.getcwd()
cmds = []
pickleins = []
pickleouts = []
scripts = []
for i, job in enumerate(jobs):
(fd, picklein) = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
with os.fdopen(fd, 'wb') as fout:
pickle.dump(job, fout)
pickleins.append(picklein)
pickleouts.append([])
for j in range(n):
fd, pickleout = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
os.close(fd)
pickleouts[-1].append(pickleout)
# pickleouts.append(
# [tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)[1]
# for j in range(n)])
code = 'import sys\n'
code += 'import os\n'
code += 'import pickle\n'
code += 'with open(\'{}\', \'rb\') as fin:\n'.format(picklein)
code += ' job = pickle.load(fin)\n'
code += 'pass\n'
for m in modules:
code += "from {} import *\n".format(m)
code += src
code += '\ntid = int(os.environ[\'SGE_TASK_ID\'])'
code += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
code += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
code += '\npickle.dump(retval, open(filenames[tid - 1], \'wb\'))\n'
(fd, script) = tempfile.mkstemp(suffix='.py', prefix='sge-', dir=path, text=True)
with os.fdopen(fd, 'w') as fout:
fout.write(code)
scripts.append(script)
cmd = '#!/bin/bash\n'
for key, value in environ.items():
cmd += 'export {:s}={:s}\n'.format(key, value)
cmd += 'python3 {}'.format(script) #XXX: Use the same executer, python
# cmd += 'python3 -c "\n'
# cmd += 'import sys\n'
# cmd += 'import os\n'
# cmd += 'import pickle\n'
# cmd += 'with open(sys.argv[1], \'rb\') as fin:\n'
# cmd += ' job = pickle.load(fin)\n'
# cmd += 'pass\n'
# for m in modules:
# cmd += "from {} import *\n".format(m)
# cmd += src
# cmd += '\ntid = int(os.environ[\'SGE_TASK_ID\'])'
# cmd += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
# cmd += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
# cmd += '\npickle.dump(retval, open(filenames[tid - 1], \'wb\'))'
# cmd += '" {:s}\n'.format(picklein)
cmds.append(cmd)
if isinstance(wait, bool):
sync = 0 if not wait else 10
elif isinstance(wait, int):
sync = wait
else:
raise ValueError("'wait' must be either 'int' or 'bool'.")
jobids = sge.run(cmds, n=n, path=path, delete=delete, sync=sync, max_running_tasks=nproc, **kwargs)
if not (sync > 0):
return None
for jobid, name in jobids:
outputs = sge.collect(jobid, name, n=n, path=path, delete=delete)
for output in outputs:
print(output, end='')
retval = [[pickle.load(open(pickleout, 'rb')) for pickleout in tasks]
for tasks in pickleouts]
if delete:
for tmpname in itertools.chain(pickleins, scripts, *pickleouts):
os.remove(tmpname)
return retval | def function[run_sge, parameter[target, jobs, n, nproc, path, delete, wait, environ, modules]]:
constant[
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel on the Sun Grid Engine einvironment.
Parameters
----------
target : function
A function to be evaluated. The function must accepts three arguments,
which are a list of arguments given as `jobs`, a job and task id (int).
This function can not be a lambda.
jobs : list
A list of arguments passed to the function.
All the argument must be picklable.
n : int, optional
A number of tasks. Repeat the evaluation `n` times for each job.
1 for default.
nproc : int, optional
A number of cores available once.
If nothing is given, it runs with no limit.
path : str, optional
A path for temporary files to be saved. The path is created if not exists.
The current directory is used as its default.
delete : bool, optional
Whether it removes temporary files after the successful execution.
True for default.
wait : bool, optional
Whether it waits until all jobs are finished. If False, it just submits jobs.
True for default.
environ : dict, optional
An environment variables used when running jobs.
"PYTHONPATH" and "LD_LIBRARY_PATH" is inherited when no `environ` is given.
modules : list, optional
A list of module names imported before evaluating the given function.
The modules are loaded as: `from [module] import *`.
Returns
-------
results : list
A list of results. Each element is a list containing `n` results.
Examples
--------
>>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))
>>> def target(args, job_id, task_id):
... return (args[1] * args[0])
...
>>> run_sge(target, jobs, nproc=2, path='.tmp')
[['spam'], ['hamham'], ['eggseggseggs']]
>>> def target(args, job_id, task_id):
... return "{:d} {}".format(task_id, args[1] * args[0])
...
>>> run_sge(target, jobs, n=2, nproc=2, path='.tmp')
[['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
]
call[name[logging].basicConfig, parameter[]]
if <ast.BoolOp object at 0x7da1b0eff370> begin[:]
<ast.Raise object at 0x7da1b0efed70>
variable[src] assign[=] call[call[name[textwrap].dedent, parameter[call[name[inspect].getsource, parameter[name[target]]]]].replace, parameter[constant["], constant[\"]]]
if compare[call[name[re].match, parameter[constant[[\s ]+], call[call[name[src].split, parameter[constant[
]]]][constant[0]]]] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b0eff160>
if <ast.UnaryOp object at 0x7da1b0effd30> begin[:]
call[name[os].makedirs, parameter[name[path]]]
if compare[name[environ] is constant[None]] begin[:]
variable[environ] assign[=] dictionary[[], []]
variable[keys] assign[=] tuple[[<ast.Constant object at 0x7da1b0eff850>, <ast.Constant object at 0x7da1b0eff4f0>]]
for taget[name[key]] in starred[name[keys]] begin[:]
if compare[name[key] in call[name[os].environ.keys, parameter[]]] begin[:]
call[name[environ]][name[key]] assign[=] call[name[os].environ][name[key]]
if <ast.BoolOp object at 0x7da1b0d00e20> begin[:]
call[name[environ]][constant[PYTHONPATH]] assign[=] call[constant[{}:{}].format, parameter[call[name[os].getcwd, parameter[]], call[name[environ]][constant[PYTHONPATH]]]]
variable[cmds] assign[=] list[[]]
variable[pickleins] assign[=] list[[]]
variable[pickleouts] assign[=] list[[]]
variable[scripts] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0d018d0>, <ast.Name object at 0x7da1b0d02440>]]] in starred[call[name[enumerate], parameter[name[jobs]]]] begin[:]
<ast.Tuple object at 0x7da1b0d01900> assign[=] call[name[tempfile].mkstemp, parameter[]]
with call[name[os].fdopen, parameter[name[fd], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[job], name[fout]]]
call[name[pickleins].append, parameter[name[picklein]]]
call[name[pickleouts].append, parameter[list[[]]]]
for taget[name[j]] in starred[call[name[range], parameter[name[n]]]] begin[:]
<ast.Tuple object at 0x7da1b0d02d40> assign[=] call[name[tempfile].mkstemp, parameter[]]
call[name[os].close, parameter[name[fd]]]
call[call[name[pickleouts]][<ast.UnaryOp object at 0x7da1b0ec0220>].append, parameter[name[pickleout]]]
variable[code] assign[=] constant[import sys
]
<ast.AugAssign object at 0x7da1b0ec06d0>
<ast.AugAssign object at 0x7da1b0ec0820>
<ast.AugAssign object at 0x7da1b0ec0580>
<ast.AugAssign object at 0x7da1b0ec04c0>
<ast.AugAssign object at 0x7da1b0ec0400>
for taget[name[m]] in starred[name[modules]] begin[:]
<ast.AugAssign object at 0x7da1b0ec0070>
<ast.AugAssign object at 0x7da1b0ec0790>
<ast.AugAssign object at 0x7da1b0ec01c0>
<ast.AugAssign object at 0x7da1b0ec0760>
<ast.AugAssign object at 0x7da1b0eff9d0>
<ast.AugAssign object at 0x7da1b0effc40>
<ast.Tuple object at 0x7da1b0effe20> assign[=] call[name[tempfile].mkstemp, parameter[]]
with call[name[os].fdopen, parameter[name[fd], constant[w]]] begin[:]
call[name[fout].write, parameter[name[code]]]
call[name[scripts].append, parameter[name[script]]]
variable[cmd] assign[=] constant[#!/bin/bash
]
for taget[tuple[[<ast.Name object at 0x7da1b0efda80>, <ast.Name object at 0x7da1b0efc490>]]] in starred[call[name[environ].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b0efd810>
<ast.AugAssign object at 0x7da1b0efd9c0>
call[name[cmds].append, parameter[name[cmd]]]
if call[name[isinstance], parameter[name[wait], name[bool]]] begin[:]
variable[sync] assign[=] <ast.IfExp object at 0x7da1b0efc070>
variable[jobids] assign[=] call[name[sge].run, parameter[name[cmds]]]
if <ast.UnaryOp object at 0x7da1b0efdf00> begin[:]
return[constant[None]]
for taget[tuple[[<ast.Name object at 0x7da1b0d6bf40>, <ast.Name object at 0x7da1b0d6bf10>]]] in starred[name[jobids]] begin[:]
variable[outputs] assign[=] call[name[sge].collect, parameter[name[jobid], name[name]]]
for taget[name[output]] in starred[name[outputs]] begin[:]
call[name[print], parameter[name[output]]]
variable[retval] assign[=] <ast.ListComp object at 0x7da1b0d6ba00>
if name[delete] begin[:]
for taget[name[tmpname]] in starred[call[name[itertools].chain, parameter[name[pickleins], name[scripts], <ast.Starred object at 0x7da1b0d6b520>]]] begin[:]
call[name[os].remove, parameter[name[tmpname]]]
return[name[retval]] | keyword[def] identifier[run_sge] ( identifier[target] , identifier[jobs] , identifier[n] = literal[int] , identifier[nproc] = keyword[None] , identifier[path] = literal[string] , identifier[delete] = keyword[True] , identifier[wait] = keyword[True] , identifier[environ] = keyword[None] , identifier[modules] =(),** identifier[kwargs] ):
literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[DEBUG] )
keyword[if] identifier[isinstance] ( identifier[target] , identifier[types] . identifier[LambdaType] ) keyword[and] identifier[target] . identifier[__name__] == literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[src] = identifier[textwrap] . identifier[dedent] ( identifier[inspect] . identifier[getsource] ( identifier[target] )). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[src] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[RuntimeError] (
literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
identifier[os] . identifier[makedirs] ( identifier[path] )
keyword[if] identifier[environ] keyword[is] keyword[None] :
identifier[environ] ={}
identifier[keys] =( literal[string] , literal[string] )
keyword[for] identifier[key] keyword[in] identifier[keys] :
keyword[if] identifier[key] keyword[in] identifier[os] . identifier[environ] . identifier[keys] ():
identifier[environ] [ identifier[key] ]= identifier[os] . identifier[environ] [ identifier[key] ]
keyword[if] literal[string] keyword[in] identifier[environ] . identifier[keys] () keyword[and] identifier[environ] [ literal[string] ]. identifier[strip] ()!= literal[string] :
identifier[environ] [ literal[string] ]= literal[string] . identifier[format] ( identifier[os] . identifier[getcwd] (), identifier[environ] [ literal[string] ])
keyword[else] :
identifier[environ] [ literal[string] ]= identifier[os] . identifier[getcwd] ()
identifier[cmds] =[]
identifier[pickleins] =[]
identifier[pickleouts] =[]
identifier[scripts] =[]
keyword[for] identifier[i] , identifier[job] keyword[in] identifier[enumerate] ( identifier[jobs] ):
( identifier[fd] , identifier[picklein] )= identifier[tempfile] . identifier[mkstemp] ( identifier[suffix] = literal[string] , identifier[prefix] = literal[string] , identifier[dir] = identifier[path] )
keyword[with] identifier[os] . identifier[fdopen] ( identifier[fd] , literal[string] ) keyword[as] identifier[fout] :
identifier[pickle] . identifier[dump] ( identifier[job] , identifier[fout] )
identifier[pickleins] . identifier[append] ( identifier[picklein] )
identifier[pickleouts] . identifier[append] ([])
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n] ):
identifier[fd] , identifier[pickleout] = identifier[tempfile] . identifier[mkstemp] ( identifier[suffix] = literal[string] , identifier[prefix] = literal[string] , identifier[dir] = identifier[path] )
identifier[os] . identifier[close] ( identifier[fd] )
identifier[pickleouts] [- literal[int] ]. identifier[append] ( identifier[pickleout] )
identifier[code] = literal[string]
identifier[code] += literal[string]
identifier[code] += literal[string]
identifier[code] += literal[string] . identifier[format] ( identifier[picklein] )
identifier[code] += literal[string]
identifier[code] += literal[string]
keyword[for] identifier[m] keyword[in] identifier[modules] :
identifier[code] += literal[string] . identifier[format] ( identifier[m] )
identifier[code] += identifier[src]
identifier[code] += literal[string]
identifier[code] += literal[string] . identifier[format] ( identifier[target] . identifier[__name__] , identifier[i] + literal[int] )
identifier[code] += literal[string] . identifier[format] ( identifier[str] ( identifier[pickleouts] [- literal[int] ]))
identifier[code] += literal[string]
( identifier[fd] , identifier[script] )= identifier[tempfile] . identifier[mkstemp] ( identifier[suffix] = literal[string] , identifier[prefix] = literal[string] , identifier[dir] = identifier[path] , identifier[text] = keyword[True] )
keyword[with] identifier[os] . identifier[fdopen] ( identifier[fd] , literal[string] ) keyword[as] identifier[fout] :
identifier[fout] . identifier[write] ( identifier[code] )
identifier[scripts] . identifier[append] ( identifier[script] )
identifier[cmd] = literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[environ] . identifier[items] ():
identifier[cmd] += literal[string] . identifier[format] ( identifier[key] , identifier[value] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[script] )
identifier[cmds] . identifier[append] ( identifier[cmd] )
keyword[if] identifier[isinstance] ( identifier[wait] , identifier[bool] ):
identifier[sync] = literal[int] keyword[if] keyword[not] identifier[wait] keyword[else] literal[int]
keyword[elif] identifier[isinstance] ( identifier[wait] , identifier[int] ):
identifier[sync] = identifier[wait]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[jobids] = identifier[sge] . identifier[run] ( identifier[cmds] , identifier[n] = identifier[n] , identifier[path] = identifier[path] , identifier[delete] = identifier[delete] , identifier[sync] = identifier[sync] , identifier[max_running_tasks] = identifier[nproc] ,** identifier[kwargs] )
keyword[if] keyword[not] ( identifier[sync] > literal[int] ):
keyword[return] keyword[None]
keyword[for] identifier[jobid] , identifier[name] keyword[in] identifier[jobids] :
identifier[outputs] = identifier[sge] . identifier[collect] ( identifier[jobid] , identifier[name] , identifier[n] = identifier[n] , identifier[path] = identifier[path] , identifier[delete] = identifier[delete] )
keyword[for] identifier[output] keyword[in] identifier[outputs] :
identifier[print] ( identifier[output] , identifier[end] = literal[string] )
identifier[retval] =[[ identifier[pickle] . identifier[load] ( identifier[open] ( identifier[pickleout] , literal[string] )) keyword[for] identifier[pickleout] keyword[in] identifier[tasks] ]
keyword[for] identifier[tasks] keyword[in] identifier[pickleouts] ]
keyword[if] identifier[delete] :
keyword[for] identifier[tmpname] keyword[in] identifier[itertools] . identifier[chain] ( identifier[pickleins] , identifier[scripts] ,* identifier[pickleouts] ):
identifier[os] . identifier[remove] ( identifier[tmpname] )
keyword[return] identifier[retval] | def run_sge(target, jobs, n=1, nproc=None, path='.', delete=True, wait=True, environ=None, modules=(), **kwargs):
"""
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel on the Sun Grid Engine einvironment.
Parameters
----------
target : function
A function to be evaluated. The function must accepts three arguments,
which are a list of arguments given as `jobs`, a job and task id (int).
This function can not be a lambda.
jobs : list
A list of arguments passed to the function.
All the argument must be picklable.
n : int, optional
A number of tasks. Repeat the evaluation `n` times for each job.
1 for default.
nproc : int, optional
A number of cores available once.
If nothing is given, it runs with no limit.
path : str, optional
A path for temporary files to be saved. The path is created if not exists.
The current directory is used as its default.
delete : bool, optional
Whether it removes temporary files after the successful execution.
True for default.
wait : bool, optional
Whether it waits until all jobs are finished. If False, it just submits jobs.
True for default.
environ : dict, optional
An environment variables used when running jobs.
"PYTHONPATH" and "LD_LIBRARY_PATH" is inherited when no `environ` is given.
modules : list, optional
A list of module names imported before evaluating the given function.
The modules are loaded as: `from [module] import *`.
Returns
-------
results : list
A list of results. Each element is a list containing `n` results.
Examples
--------
>>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs'))
>>> def target(args, job_id, task_id):
... return (args[1] * args[0])
...
>>> run_sge(target, jobs, nproc=2, path='.tmp')
[['spam'], ['hamham'], ['eggseggseggs']]
>>> def target(args, job_id, task_id):
... return "{:d} {}".format(task_id, args[1] * args[0])
...
>>> run_sge(target, jobs, n=2, nproc=2, path='.tmp')
[['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']]
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
"""
logging.basicConfig(level=logging.DEBUG)
if isinstance(target, types.LambdaType) and target.__name__ == '<lambda>':
raise RuntimeError('A lambda function is not accepted') # depends on [control=['if'], data=[]]
# src = textwrap.dedent(inspect.getsource(singlerun)).replace(r'"', r'\"')
src = textwrap.dedent(inspect.getsource(target)).replace('"', '\\"')
if re.match('[\\s\t]+', src.split('\n')[0]) is not None:
raise RuntimeError('Wrong indentation was found in the source translated') # depends on [control=['if'], data=[]]
if not os.path.isdir(path):
os.makedirs(path) #XXX: MYOB # depends on [control=['if'], data=[]]
if environ is None:
environ = {}
keys = ('LD_LIBRARY_PATH', 'PYTHONPATH')
for key in keys:
if key in os.environ.keys():
environ[key] = os.environ[key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
if 'PYTHONPATH' in environ.keys() and environ['PYTHONPATH'].strip() != '':
environ['PYTHONPATH'] = '{}:{}'.format(os.getcwd(), environ['PYTHONPATH']) # depends on [control=['if'], data=[]]
else:
environ['PYTHONPATH'] = os.getcwd() # depends on [control=['if'], data=['environ']]
cmds = []
pickleins = []
pickleouts = []
scripts = []
for (i, job) in enumerate(jobs):
(fd, picklein) = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
with os.fdopen(fd, 'wb') as fout:
pickle.dump(job, fout) # depends on [control=['with'], data=['fout']]
pickleins.append(picklein)
pickleouts.append([])
for j in range(n):
(fd, pickleout) = tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)
os.close(fd)
pickleouts[-1].append(pickleout) # depends on [control=['for'], data=[]]
# pickleouts.append(
# [tempfile.mkstemp(suffix='.pickle', prefix='sge-', dir=path)[1]
# for j in range(n)])
code = 'import sys\n'
code += 'import os\n'
code += 'import pickle\n'
code += "with open('{}', 'rb') as fin:\n".format(picklein)
code += ' job = pickle.load(fin)\n'
code += 'pass\n'
for m in modules:
code += 'from {} import *\n'.format(m) # depends on [control=['for'], data=['m']]
code += src
code += "\ntid = int(os.environ['SGE_TASK_ID'])"
code += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
code += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
code += "\npickle.dump(retval, open(filenames[tid - 1], 'wb'))\n"
(fd, script) = tempfile.mkstemp(suffix='.py', prefix='sge-', dir=path, text=True)
with os.fdopen(fd, 'w') as fout:
fout.write(code) # depends on [control=['with'], data=['fout']]
scripts.append(script)
cmd = '#!/bin/bash\n'
for (key, value) in environ.items():
cmd += 'export {:s}={:s}\n'.format(key, value) # depends on [control=['for'], data=[]]
cmd += 'python3 {}'.format(script) #XXX: Use the same executer, python
# cmd += 'python3 -c "\n'
# cmd += 'import sys\n'
# cmd += 'import os\n'
# cmd += 'import pickle\n'
# cmd += 'with open(sys.argv[1], \'rb\') as fin:\n'
# cmd += ' job = pickle.load(fin)\n'
# cmd += 'pass\n'
# for m in modules:
# cmd += "from {} import *\n".format(m)
# cmd += src
# cmd += '\ntid = int(os.environ[\'SGE_TASK_ID\'])'
# cmd += '\nretval = {:s}(job, {:d}, tid)'.format(target.__name__, i + 1)
# cmd += '\nfilenames = {:s}'.format(str(pickleouts[-1]))
# cmd += '\npickle.dump(retval, open(filenames[tid - 1], \'wb\'))'
# cmd += '" {:s}\n'.format(picklein)
cmds.append(cmd) # depends on [control=['for'], data=[]]
if isinstance(wait, bool):
sync = 0 if not wait else 10 # depends on [control=['if'], data=[]]
elif isinstance(wait, int):
sync = wait # depends on [control=['if'], data=[]]
else:
raise ValueError("'wait' must be either 'int' or 'bool'.")
jobids = sge.run(cmds, n=n, path=path, delete=delete, sync=sync, max_running_tasks=nproc, **kwargs)
if not sync > 0:
return None # depends on [control=['if'], data=[]]
for (jobid, name) in jobids:
outputs = sge.collect(jobid, name, n=n, path=path, delete=delete)
for output in outputs:
print(output, end='') # depends on [control=['for'], data=['output']] # depends on [control=['for'], data=[]]
retval = [[pickle.load(open(pickleout, 'rb')) for pickleout in tasks] for tasks in pickleouts]
if delete:
for tmpname in itertools.chain(pickleins, scripts, *pickleouts):
os.remove(tmpname) # depends on [control=['for'], data=['tmpname']] # depends on [control=['if'], data=[]]
return retval |
def _getAuth(self, auth):
"""Create the authorization/identification portion of a request."""
if type(auth) is dict:
return auth
else:
# auth is string
if None != self._clientid:
return {"cik": auth, "client_id": self._clientid}
elif None != self._resourceid:
return {"cik": auth, "resource_id": self._resourceid}
return {"cik": auth} | def function[_getAuth, parameter[self, auth]]:
constant[Create the authorization/identification portion of a request.]
if compare[call[name[type], parameter[name[auth]]] is name[dict]] begin[:]
return[name[auth]] | keyword[def] identifier[_getAuth] ( identifier[self] , identifier[auth] ):
literal[string]
keyword[if] identifier[type] ( identifier[auth] ) keyword[is] identifier[dict] :
keyword[return] identifier[auth]
keyword[else] :
keyword[if] keyword[None] != identifier[self] . identifier[_clientid] :
keyword[return] { literal[string] : identifier[auth] , literal[string] : identifier[self] . identifier[_clientid] }
keyword[elif] keyword[None] != identifier[self] . identifier[_resourceid] :
keyword[return] { literal[string] : identifier[auth] , literal[string] : identifier[self] . identifier[_resourceid] }
keyword[return] { literal[string] : identifier[auth] } | def _getAuth(self, auth):
"""Create the authorization/identification portion of a request."""
if type(auth) is dict:
return auth # depends on [control=['if'], data=[]]
else:
# auth is string
if None != self._clientid:
return {'cik': auth, 'client_id': self._clientid} # depends on [control=['if'], data=[]]
elif None != self._resourceid:
return {'cik': auth, 'resource_id': self._resourceid} # depends on [control=['if'], data=[]]
return {'cik': auth} |
def transform(self, fn, dtype=None, *args, **kwargs):
"""Equivalent to map, compatibility purpose only.
Column parameter ignored.
"""
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params())
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True)
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True)
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True) | def function[transform, parameter[self, fn, dtype]]:
constant[Equivalent to map, compatibility purpose only.
Column parameter ignored.
]
variable[rdd] assign[=] call[name[self]._rdd.map, parameter[name[fn]]]
if compare[name[dtype] is constant[None]] begin[:]
return[call[name[self].__class__, parameter[name[rdd]]]]
if compare[name[dtype] is name[np].ndarray] begin[:]
return[call[name[ArrayRDD], parameter[name[rdd]]]] | keyword[def] identifier[transform] ( identifier[self] , identifier[fn] , identifier[dtype] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[rdd] = identifier[self] . identifier[_rdd] . identifier[map] ( identifier[fn] )
keyword[if] identifier[dtype] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[__class__] ( identifier[rdd] , identifier[noblock] = keyword[True] ,** identifier[self] . identifier[get_params] ())
keyword[if] identifier[dtype] keyword[is] identifier[np] . identifier[ndarray] :
keyword[return] identifier[ArrayRDD] ( identifier[rdd] , identifier[bsize] = identifier[self] . identifier[bsize] , identifier[noblock] = keyword[True] )
keyword[elif] identifier[dtype] keyword[is] identifier[sp] . identifier[spmatrix] :
keyword[return] identifier[SparseRDD] ( identifier[rdd] , identifier[bsize] = identifier[self] . identifier[bsize] , identifier[noblock] = keyword[True] )
keyword[else] :
keyword[return] identifier[BlockRDD] ( identifier[rdd] , identifier[bsize] = identifier[self] . identifier[bsize] , identifier[dtype] = identifier[dtype] , identifier[noblock] = keyword[True] ) | def transform(self, fn, dtype=None, *args, **kwargs):
"""Equivalent to map, compatibility purpose only.
Column parameter ignored.
"""
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params()) # depends on [control=['if'], data=[]]
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True) # depends on [control=['if'], data=[]]
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True) # depends on [control=['if'], data=[]]
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True) |
def maintained_selection():
"""Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
"""
previous_selection = cmds.ls(selection=True)
try:
yield
finally:
if previous_selection:
cmds.select(previous_selection,
replace=True,
noExpand=True)
else:
cmds.select(deselect=True,
noExpand=True) | def function[maintained_selection, parameter[]]:
constant[Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
]
variable[previous_selection] assign[=] call[name[cmds].ls, parameter[]]
<ast.Try object at 0x7da1b02c2800> | keyword[def] identifier[maintained_selection] ():
literal[string]
identifier[previous_selection] = identifier[cmds] . identifier[ls] ( identifier[selection] = keyword[True] )
keyword[try] :
keyword[yield]
keyword[finally] :
keyword[if] identifier[previous_selection] :
identifier[cmds] . identifier[select] ( identifier[previous_selection] ,
identifier[replace] = keyword[True] ,
identifier[noExpand] = keyword[True] )
keyword[else] :
identifier[cmds] . identifier[select] ( identifier[deselect] = keyword[True] ,
identifier[noExpand] = keyword[True] ) | def maintained_selection():
"""Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... cmds.select('node', replace=True)
>>> # Selection restored
"""
previous_selection = cmds.ls(selection=True)
try:
yield # depends on [control=['try'], data=[]]
finally:
if previous_selection:
cmds.select(previous_selection, replace=True, noExpand=True) # depends on [control=['if'], data=[]]
else:
cmds.select(deselect=True, noExpand=True) |
def discover(app, module_name=None):
"""
Automatically apply the permission logics written in the specified
module.
Examples
--------
Assume if you have a ``perms.py`` in ``your_app`` as::
from permission.logics import AuthorPermissionLogic
PERMISSION_LOGICS = (
('your_app.your_model', AuthorPermissionLogic),
)
Use this method to apply the permission logics enumerated in
``PERMISSION_LOGICS`` variable like:
>>> discover('your_app')
"""
from permission.compat import import_module
from permission.compat import get_model
from permission.conf import settings
from permission.utils.logics import add_permission_logic
variable_name = settings.PERMISSION_AUTODISCOVER_VARIABLE_NAME
module_name = module_name or settings.PERMISSION_AUTODISCOVER_MODULE_NAME
# import the module
m = import_module('%s.%s' % (app, module_name))
# check if the module have PERMISSION_LOGICS variable
if hasattr(m, variable_name):
# apply permission logics automatically
permission_logic_set = getattr(m, variable_name)
for model, permission_logic in permission_logic_set:
if isinstance(model, six.string_types):
# convert model string to model instance
model = get_model(*model.split('.', 1))
add_permission_logic(model, permission_logic) | def function[discover, parameter[app, module_name]]:
constant[
Automatically apply the permission logics written in the specified
module.
Examples
--------
Assume if you have a ``perms.py`` in ``your_app`` as::
from permission.logics import AuthorPermissionLogic
PERMISSION_LOGICS = (
('your_app.your_model', AuthorPermissionLogic),
)
Use this method to apply the permission logics enumerated in
``PERMISSION_LOGICS`` variable like:
>>> discover('your_app')
]
from relative_module[permission.compat] import module[import_module]
from relative_module[permission.compat] import module[get_model]
from relative_module[permission.conf] import module[settings]
from relative_module[permission.utils.logics] import module[add_permission_logic]
variable[variable_name] assign[=] name[settings].PERMISSION_AUTODISCOVER_VARIABLE_NAME
variable[module_name] assign[=] <ast.BoolOp object at 0x7da1b0609c90>
variable[m] assign[=] call[name[import_module], parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b06bde70>, <ast.Name object at 0x7da1b06bd8d0>]]]]]
if call[name[hasattr], parameter[name[m], name[variable_name]]] begin[:]
variable[permission_logic_set] assign[=] call[name[getattr], parameter[name[m], name[variable_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b06be440>, <ast.Name object at 0x7da1b06befb0>]]] in starred[name[permission_logic_set]] begin[:]
if call[name[isinstance], parameter[name[model], name[six].string_types]] begin[:]
variable[model] assign[=] call[name[get_model], parameter[<ast.Starred object at 0x7da1b06be6e0>]]
call[name[add_permission_logic], parameter[name[model], name[permission_logic]]] | keyword[def] identifier[discover] ( identifier[app] , identifier[module_name] = keyword[None] ):
literal[string]
keyword[from] identifier[permission] . identifier[compat] keyword[import] identifier[import_module]
keyword[from] identifier[permission] . identifier[compat] keyword[import] identifier[get_model]
keyword[from] identifier[permission] . identifier[conf] keyword[import] identifier[settings]
keyword[from] identifier[permission] . identifier[utils] . identifier[logics] keyword[import] identifier[add_permission_logic]
identifier[variable_name] = identifier[settings] . identifier[PERMISSION_AUTODISCOVER_VARIABLE_NAME]
identifier[module_name] = identifier[module_name] keyword[or] identifier[settings] . identifier[PERMISSION_AUTODISCOVER_MODULE_NAME]
identifier[m] = identifier[import_module] ( literal[string] %( identifier[app] , identifier[module_name] ))
keyword[if] identifier[hasattr] ( identifier[m] , identifier[variable_name] ):
identifier[permission_logic_set] = identifier[getattr] ( identifier[m] , identifier[variable_name] )
keyword[for] identifier[model] , identifier[permission_logic] keyword[in] identifier[permission_logic_set] :
keyword[if] identifier[isinstance] ( identifier[model] , identifier[six] . identifier[string_types] ):
identifier[model] = identifier[get_model] (* identifier[model] . identifier[split] ( literal[string] , literal[int] ))
identifier[add_permission_logic] ( identifier[model] , identifier[permission_logic] ) | def discover(app, module_name=None):
"""
Automatically apply the permission logics written in the specified
module.
Examples
--------
Assume if you have a ``perms.py`` in ``your_app`` as::
from permission.logics import AuthorPermissionLogic
PERMISSION_LOGICS = (
('your_app.your_model', AuthorPermissionLogic),
)
Use this method to apply the permission logics enumerated in
``PERMISSION_LOGICS`` variable like:
>>> discover('your_app')
"""
from permission.compat import import_module
from permission.compat import get_model
from permission.conf import settings
from permission.utils.logics import add_permission_logic
variable_name = settings.PERMISSION_AUTODISCOVER_VARIABLE_NAME
module_name = module_name or settings.PERMISSION_AUTODISCOVER_MODULE_NAME
# import the module
m = import_module('%s.%s' % (app, module_name))
# check if the module have PERMISSION_LOGICS variable
if hasattr(m, variable_name):
# apply permission logics automatically
permission_logic_set = getattr(m, variable_name)
for (model, permission_logic) in permission_logic_set:
if isinstance(model, six.string_types):
# convert model string to model instance
model = get_model(*model.split('.', 1)) # depends on [control=['if'], data=[]]
add_permission_logic(model, permission_logic) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def runnable(args):
""" Show me what can be run in a given workspace """
w = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(w, 200)
workspace_d = w.json()
if args.config and args.namespace and not args.entity:
# See what entities I can run on with this config
r = fapi.validate_config(args.project, args.workspace, args.namespace,
args.config)
fapi._check_response_code(r, 200)
config_d = r.json()
# First validate without any sample sets
errs = sum(_validate_helper(args, config_d, workspace_d, None), [])
if errs:
print("Configuration contains invalid expressions")
return 1
# Now get all the possible entities, and evaluate each
entity_type = config_d['methodConfiguration']['rootEntityType']
ent_r = fapi.get_entities(args.project, args.workspace, entity_type)
fapi._check_response_code(r, 200)
entities = ent_r.json()
can_run_on = []
cannot_run_on = []
# Validate every entity
for entity_d in entities:
# If there are errors in the validation
if sum(_validate_helper(args, config_d, workspace_d, entity_d), []):
cannot_run_on.append(entity_d['name'])
else:
can_run_on.append(entity_d['name'])
# Print what can be run
if can_run_on:
print("{0} CAN be run on {1} {2}(s):".format(args.config, len(can_run_on), entity_type))
print("\n".join(can_run_on)+"\n")
print("{0} CANNOT be run on {1} {2}(s)".format(args.config, len(cannot_run_on), entity_type))
#print("\n".join(cannot_run_on))
# See what method configs are possible for the given sample set
elif args.entity and args.entity_type and not args.config:
entity_r = fapi.get_entity(args.project, args.workspace,
args.entity_type, args.entity)
fapi._check_response_code(entity_r, [200,404])
if entity_r.status_code == 404:
print("Error: No {0} named '{1}'".format(args.entity_type, args.entity))
return 2
entity_d = entity_r.json()
# Now get all the method configs in the workspace
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
# Iterate over configs in the workspace, and validate against them
for cfg in conf_r.json():
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
if not errs:
print(cfg['namespace'] + "/" + cfg['name'])
elif args.entity_type:
# Last mode, build a matrix of everything based on the entity type
# Get all of the entity_type
ent_r = fapi.get_entities(args.project, args.workspace, args.entity_type)
fapi._check_response_code(ent_r, 200)
entities = ent_r.json()
entity_names = sorted(e['name'] for e in entities)
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
conf_list = conf_r.json()
config_names = sorted(c['namespace'] + '/' + c['name'] for c in conf_list)
mat = {c:dict() for c in config_names}
# Now iterate over configs, building up the matrix
# Iterate over configs in the workspace, and validate against them
for cfg in conf_list:
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace,
cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue
config_d = r.json()
# Validate against every entity
for entity_d in entities:
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d),[])
#TODO: True/False? Y/N?
symbol = "X" if not errs else ""
cfg_name = cfg['namespace'] + '/' + cfg['name']
mat[cfg_name][entity_d['name']] = symbol
# Now print the validation matrix
# headers
print("Namespace/Method Config\t" + "\t".join(entity_names))
for conf in config_names:
print(conf + "\t" + "\t".join(mat[conf][e] for e in entity_names))
else:
print("runnable requires a namespace+configuration or entity type")
return 1 | def function[runnable, parameter[args]]:
constant[ Show me what can be run in a given workspace ]
variable[w] assign[=] call[name[fapi].get_workspace, parameter[name[args].project, name[args].workspace]]
call[name[fapi]._check_response_code, parameter[name[w], constant[200]]]
variable[workspace_d] assign[=] call[name[w].json, parameter[]]
if <ast.BoolOp object at 0x7da1b1a8f3d0> begin[:]
variable[r] assign[=] call[name[fapi].validate_config, parameter[name[args].project, name[args].workspace, name[args].namespace, name[args].config]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]]
variable[config_d] assign[=] call[name[r].json, parameter[]]
variable[errs] assign[=] call[name[sum], parameter[call[name[_validate_helper], parameter[name[args], name[config_d], name[workspace_d], constant[None]]], list[[]]]]
if name[errs] begin[:]
call[name[print], parameter[constant[Configuration contains invalid expressions]]]
return[constant[1]]
variable[entity_type] assign[=] call[call[name[config_d]][constant[methodConfiguration]]][constant[rootEntityType]]
variable[ent_r] assign[=] call[name[fapi].get_entities, parameter[name[args].project, name[args].workspace, name[entity_type]]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]]
variable[entities] assign[=] call[name[ent_r].json, parameter[]]
variable[can_run_on] assign[=] list[[]]
variable[cannot_run_on] assign[=] list[[]]
for taget[name[entity_d]] in starred[name[entities]] begin[:]
if call[name[sum], parameter[call[name[_validate_helper], parameter[name[args], name[config_d], name[workspace_d], name[entity_d]]], list[[]]]] begin[:]
call[name[cannot_run_on].append, parameter[call[name[entity_d]][constant[name]]]]
if name[can_run_on] begin[:]
call[name[print], parameter[call[constant[{0} CAN be run on {1} {2}(s):].format, parameter[name[args].config, call[name[len], parameter[name[can_run_on]]], name[entity_type]]]]]
call[name[print], parameter[binary_operation[call[constant[
].join, parameter[name[can_run_on]]] + constant[
]]]]
call[name[print], parameter[call[constant[{0} CANNOT be run on {1} {2}(s)].format, parameter[name[args].config, call[name[len], parameter[name[cannot_run_on]]], name[entity_type]]]]] | keyword[def] identifier[runnable] ( identifier[args] ):
literal[string]
identifier[w] = identifier[fapi] . identifier[get_workspace] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] )
identifier[fapi] . identifier[_check_response_code] ( identifier[w] , literal[int] )
identifier[workspace_d] = identifier[w] . identifier[json] ()
keyword[if] identifier[args] . identifier[config] keyword[and] identifier[args] . identifier[namespace] keyword[and] keyword[not] identifier[args] . identifier[entity] :
identifier[r] = identifier[fapi] . identifier[validate_config] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] , identifier[args] . identifier[namespace] ,
identifier[args] . identifier[config] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
identifier[config_d] = identifier[r] . identifier[json] ()
identifier[errs] = identifier[sum] ( identifier[_validate_helper] ( identifier[args] , identifier[config_d] , identifier[workspace_d] , keyword[None] ),[])
keyword[if] identifier[errs] :
identifier[print] ( literal[string] )
keyword[return] literal[int]
identifier[entity_type] = identifier[config_d] [ literal[string] ][ literal[string] ]
identifier[ent_r] = identifier[fapi] . identifier[get_entities] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] , identifier[entity_type] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
identifier[entities] = identifier[ent_r] . identifier[json] ()
identifier[can_run_on] =[]
identifier[cannot_run_on] =[]
keyword[for] identifier[entity_d] keyword[in] identifier[entities] :
keyword[if] identifier[sum] ( identifier[_validate_helper] ( identifier[args] , identifier[config_d] , identifier[workspace_d] , identifier[entity_d] ),[]):
identifier[cannot_run_on] . identifier[append] ( identifier[entity_d] [ literal[string] ])
keyword[else] :
identifier[can_run_on] . identifier[append] ( identifier[entity_d] [ literal[string] ])
keyword[if] identifier[can_run_on] :
identifier[print] ( literal[string] . identifier[format] ( identifier[args] . identifier[config] , identifier[len] ( identifier[can_run_on] ), identifier[entity_type] ))
identifier[print] ( literal[string] . identifier[join] ( identifier[can_run_on] )+ literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[args] . identifier[config] , identifier[len] ( identifier[cannot_run_on] ), identifier[entity_type] ))
keyword[elif] identifier[args] . identifier[entity] keyword[and] identifier[args] . identifier[entity_type] keyword[and] keyword[not] identifier[args] . identifier[config] :
identifier[entity_r] = identifier[fapi] . identifier[get_entity] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] ,
identifier[args] . identifier[entity_type] , identifier[args] . identifier[entity] )
identifier[fapi] . identifier[_check_response_code] ( identifier[entity_r] ,[ literal[int] , literal[int] ])
keyword[if] identifier[entity_r] . identifier[status_code] == literal[int] :
identifier[print] ( literal[string] . identifier[format] ( identifier[args] . identifier[entity_type] , identifier[args] . identifier[entity] ))
keyword[return] literal[int]
identifier[entity_d] = identifier[entity_r] . identifier[json] ()
identifier[conf_r] = identifier[fapi] . identifier[list_workspace_configs] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] )
identifier[fapi] . identifier[_check_response_code] ( identifier[conf_r] , literal[int] )
keyword[for] identifier[cfg] keyword[in] identifier[conf_r] . identifier[json] ():
keyword[if] identifier[args] . identifier[namespace] keyword[and] identifier[cfg] [ literal[string] ]!= identifier[args] . identifier[namespace] :
keyword[continue]
identifier[r] = identifier[fapi] . identifier[validate_config] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] ,
identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[fapi] . identifier[_check_response_code] ( identifier[r] ,[ literal[int] , literal[int] ])
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
keyword[continue]
identifier[config_d] = identifier[r] . identifier[json] ()
identifier[errs] = identifier[sum] ( identifier[_validate_helper] ( identifier[args] , identifier[config_d] , identifier[workspace_d] , identifier[entity_d] ),[])
keyword[if] keyword[not] identifier[errs] :
identifier[print] ( identifier[cfg] [ literal[string] ]+ literal[string] + identifier[cfg] [ literal[string] ])
keyword[elif] identifier[args] . identifier[entity_type] :
identifier[ent_r] = identifier[fapi] . identifier[get_entities] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] , identifier[args] . identifier[entity_type] )
identifier[fapi] . identifier[_check_response_code] ( identifier[ent_r] , literal[int] )
identifier[entities] = identifier[ent_r] . identifier[json] ()
identifier[entity_names] = identifier[sorted] ( identifier[e] [ literal[string] ] keyword[for] identifier[e] keyword[in] identifier[entities] )
identifier[conf_r] = identifier[fapi] . identifier[list_workspace_configs] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] )
identifier[fapi] . identifier[_check_response_code] ( identifier[conf_r] , literal[int] )
identifier[conf_list] = identifier[conf_r] . identifier[json] ()
identifier[config_names] = identifier[sorted] ( identifier[c] [ literal[string] ]+ literal[string] + identifier[c] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[conf_list] )
identifier[mat] ={ identifier[c] : identifier[dict] () keyword[for] identifier[c] keyword[in] identifier[config_names] }
keyword[for] identifier[cfg] keyword[in] identifier[conf_list] :
keyword[if] identifier[args] . identifier[namespace] keyword[and] identifier[cfg] [ literal[string] ]!= identifier[args] . identifier[namespace] :
keyword[continue]
identifier[r] = identifier[fapi] . identifier[validate_config] ( identifier[args] . identifier[project] , identifier[args] . identifier[workspace] ,
identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[fapi] . identifier[_check_response_code] ( identifier[r] ,[ literal[int] , literal[int] ])
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
keyword[continue]
identifier[config_d] = identifier[r] . identifier[json] ()
keyword[for] identifier[entity_d] keyword[in] identifier[entities] :
identifier[errs] = identifier[sum] ( identifier[_validate_helper] ( identifier[args] , identifier[config_d] , identifier[workspace_d] , identifier[entity_d] ),[])
identifier[symbol] = literal[string] keyword[if] keyword[not] identifier[errs] keyword[else] literal[string]
identifier[cfg_name] = identifier[cfg] [ literal[string] ]+ literal[string] + identifier[cfg] [ literal[string] ]
identifier[mat] [ identifier[cfg_name] ][ identifier[entity_d] [ literal[string] ]]= identifier[symbol]
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[entity_names] ))
keyword[for] identifier[conf] keyword[in] identifier[config_names] :
identifier[print] ( identifier[conf] + literal[string] + literal[string] . identifier[join] ( identifier[mat] [ identifier[conf] ][ identifier[e] ] keyword[for] identifier[e] keyword[in] identifier[entity_names] ))
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] literal[int] | def runnable(args):
""" Show me what can be run in a given workspace """
w = fapi.get_workspace(args.project, args.workspace)
fapi._check_response_code(w, 200)
workspace_d = w.json()
if args.config and args.namespace and (not args.entity):
# See what entities I can run on with this config
r = fapi.validate_config(args.project, args.workspace, args.namespace, args.config)
fapi._check_response_code(r, 200)
config_d = r.json()
# First validate without any sample sets
errs = sum(_validate_helper(args, config_d, workspace_d, None), [])
if errs:
print('Configuration contains invalid expressions')
return 1 # depends on [control=['if'], data=[]]
# Now get all the possible entities, and evaluate each
entity_type = config_d['methodConfiguration']['rootEntityType']
ent_r = fapi.get_entities(args.project, args.workspace, entity_type)
fapi._check_response_code(r, 200)
entities = ent_r.json()
can_run_on = []
cannot_run_on = []
# Validate every entity
for entity_d in entities:
# If there are errors in the validation
if sum(_validate_helper(args, config_d, workspace_d, entity_d), []):
cannot_run_on.append(entity_d['name']) # depends on [control=['if'], data=[]]
else:
can_run_on.append(entity_d['name']) # depends on [control=['for'], data=['entity_d']]
# Print what can be run
if can_run_on:
print('{0} CAN be run on {1} {2}(s):'.format(args.config, len(can_run_on), entity_type))
print('\n'.join(can_run_on) + '\n') # depends on [control=['if'], data=[]]
print('{0} CANNOT be run on {1} {2}(s)'.format(args.config, len(cannot_run_on), entity_type)) # depends on [control=['if'], data=[]]
#print("\n".join(cannot_run_on))
# See what method configs are possible for the given sample set
elif args.entity and args.entity_type and (not args.config):
entity_r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity)
fapi._check_response_code(entity_r, [200, 404])
if entity_r.status_code == 404:
print("Error: No {0} named '{1}'".format(args.entity_type, args.entity))
return 2 # depends on [control=['if'], data=[]]
entity_d = entity_r.json()
# Now get all the method configs in the workspace
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
# Iterate over configs in the workspace, and validate against them
for cfg in conf_r.json():
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue # depends on [control=['if'], data=[]]
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace, cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue # depends on [control=['if'], data=[]]
config_d = r.json()
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d), [])
if not errs:
print(cfg['namespace'] + '/' + cfg['name']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cfg']] # depends on [control=['if'], data=[]]
elif args.entity_type:
# Last mode, build a matrix of everything based on the entity type
# Get all of the entity_type
ent_r = fapi.get_entities(args.project, args.workspace, args.entity_type)
fapi._check_response_code(ent_r, 200)
entities = ent_r.json()
entity_names = sorted((e['name'] for e in entities))
conf_r = fapi.list_workspace_configs(args.project, args.workspace)
fapi._check_response_code(conf_r, 200)
conf_list = conf_r.json()
config_names = sorted((c['namespace'] + '/' + c['name'] for c in conf_list))
mat = {c: dict() for c in config_names}
# Now iterate over configs, building up the matrix
# Iterate over configs in the workspace, and validate against them
for cfg in conf_list:
# If we limit search to a particular namespace, skip ones that don't match
if args.namespace and cfg['namespace'] != args.namespace:
continue # depends on [control=['if'], data=[]]
# But we have to get the full description
r = fapi.validate_config(args.project, args.workspace, cfg['namespace'], cfg['name'])
fapi._check_response_code(r, [200, 404])
if r.status_code == 404:
# Permission error, continue
continue # depends on [control=['if'], data=[]]
config_d = r.json()
# Validate against every entity
for entity_d in entities:
errs = sum(_validate_helper(args, config_d, workspace_d, entity_d), [])
#TODO: True/False? Y/N?
symbol = 'X' if not errs else ''
cfg_name = cfg['namespace'] + '/' + cfg['name']
mat[cfg_name][entity_d['name']] = symbol # depends on [control=['for'], data=['entity_d']] # depends on [control=['for'], data=['cfg']]
# Now print the validation matrix
# headers
print('Namespace/Method Config\t' + '\t'.join(entity_names))
for conf in config_names:
print(conf + '\t' + '\t'.join((mat[conf][e] for e in entity_names))) # depends on [control=['for'], data=['conf']] # depends on [control=['if'], data=[]]
else:
print('runnable requires a namespace+configuration or entity type')
return 1 |
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %r" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %r" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
) | def function[assert_dict_eq, parameter[expected, actual, number_tolerance, dict_path]]:
constant[Asserts that two dictionaries are equal, producing a custom message if they are not.]
call[name[assert_is_instance], parameter[name[expected], name[dict]]]
call[name[assert_is_instance], parameter[name[actual], name[dict]]]
variable[expected_keys] assign[=] call[name[set], parameter[call[name[expected].keys, parameter[]]]]
variable[actual_keys] assign[=] call[name[set], parameter[call[name[actual].keys, parameter[]]]]
assert[compare[name[expected_keys] less_or_equal[<=] name[actual_keys]]]
assert[compare[name[actual_keys] less_or_equal[<=] name[expected_keys]]]
for taget[name[k]] in starred[name[expected_keys]] begin[:]
variable[key_path] assign[=] binary_operation[name[dict_path] + list[[<ast.Name object at 0x7da1b0fc60e0>]]]
call[name[assert_is_instance], parameter[call[name[actual]][name[k]], call[name[type], parameter[call[name[expected]][name[k]]]]]]
call[name[assert_is_instance], parameter[call[name[expected]][name[k]], call[name[type], parameter[call[name[actual]][name[k]]]]]]
if call[name[isinstance], parameter[call[name[actual]][name[k]], name[dict]]] begin[:]
call[name[assert_dict_eq], parameter[call[name[expected]][name[k]], call[name[actual]][name[k]]]] | keyword[def] identifier[assert_dict_eq] ( identifier[expected] , identifier[actual] , identifier[number_tolerance] = keyword[None] , identifier[dict_path] =[]):
literal[string]
identifier[assert_is_instance] ( identifier[expected] , identifier[dict] )
identifier[assert_is_instance] ( identifier[actual] , identifier[dict] )
identifier[expected_keys] = identifier[set] ( identifier[expected] . identifier[keys] ())
identifier[actual_keys] = identifier[set] ( identifier[actual] . identifier[keys] ())
keyword[assert] identifier[expected_keys] <= identifier[actual_keys] , literal[string] %(
identifier[_dict_path_string] ( identifier[dict_path] ),
identifier[expected_keys] - identifier[actual_keys] ,
)
keyword[assert] identifier[actual_keys] <= identifier[expected_keys] , literal[string] %(
identifier[_dict_path_string] ( identifier[dict_path] ),
identifier[actual_keys] - identifier[expected_keys] ,
)
keyword[for] identifier[k] keyword[in] identifier[expected_keys] :
identifier[key_path] = identifier[dict_path] +[ identifier[k] ]
identifier[assert_is_instance] (
identifier[actual] [ identifier[k] ],
identifier[type] ( identifier[expected] [ identifier[k] ]),
identifier[extra] = literal[string] % identifier[_dict_path_string] ( identifier[key_path] ),
)
identifier[assert_is_instance] (
identifier[expected] [ identifier[k] ],
identifier[type] ( identifier[actual] [ identifier[k] ]),
identifier[extra] = literal[string] % identifier[_dict_path_string] ( identifier[key_path] ),
)
keyword[if] identifier[isinstance] ( identifier[actual] [ identifier[k] ], identifier[dict] ):
identifier[assert_dict_eq] (
identifier[expected] [ identifier[k] ],
identifier[actual] [ identifier[k] ],
identifier[number_tolerance] = identifier[number_tolerance] ,
identifier[dict_path] = identifier[key_path] ,
)
keyword[elif] identifier[isinstance] ( identifier[actual] [ identifier[k] ], identifier[_number_types] ):
identifier[assert_eq] (
identifier[expected] [ identifier[k] ],
identifier[actual] [ identifier[k] ],
identifier[extra] = literal[string] % identifier[_dict_path_string] ( identifier[key_path] ),
identifier[tolerance] = identifier[number_tolerance] ,
)
keyword[else] :
identifier[assert_eq] (
identifier[expected] [ identifier[k] ],
identifier[actual] [ identifier[k] ],
identifier[extra] = literal[string] % identifier[_dict_path_string] ( identifier[key_path] ),
) | def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, 'Actual dict at %s is missing keys: %r' % (_dict_path_string(dict_path), expected_keys - actual_keys)
assert actual_keys <= expected_keys, 'Actual dict at %s has extra keys: %r' % (_dict_path_string(dict_path), actual_keys - expected_keys)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(actual[k], type(expected[k]), extra="Types don't match for %s" % _dict_path_string(key_path))
assert_is_instance(expected[k], type(actual[k]), extra="Types don't match for %s" % _dict_path_string(key_path))
if isinstance(actual[k], dict):
assert_dict_eq(expected[k], actual[k], number_tolerance=number_tolerance, dict_path=key_path) # depends on [control=['if'], data=[]]
elif isinstance(actual[k], _number_types):
assert_eq(expected[k], actual[k], extra="Value doesn't match for %s" % _dict_path_string(key_path), tolerance=number_tolerance) # depends on [control=['if'], data=[]]
else:
assert_eq(expected[k], actual[k], extra="Value doesn't match for %s" % _dict_path_string(key_path)) # depends on [control=['for'], data=['k']] |
def QTabCapsulate(self, name, widget_list, blocking = False):
"""Helper function that encapsulates QWidget into a QMainWindow
:param widget_list: List of tuples : [(widget,"name"), (widget,"name"), ..]
"""
class QuickWindow(QtWidgets.QMainWindow):
class Signals(QtCore.QObject):
close = QtCore.Signal()
show = QtCore.Signal()
def __init__(self, blocking = False, parent = None):
super().__init__(parent)
self.propagate = True # send signals or not
self.setStyleSheet(style.main_gui)
if (blocking):
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.signals = self.Signals()
self.tab = QtWidgets.QTabWidget()
self.setCentralWidget(self.tab)
self.setLayout(QtWidgets.QHBoxLayout())
def closeEvent(self, e):
if (self.propagate):
self.signals.close.emit()
e.accept()
def showEvent(self, e):
if (self.propagate):
self.signals.show.emit()
e.accept()
def setPropagate(self):
self.propagate = True
def unSetPropagate(self):
self.propagate = False
win = QuickWindow(blocking = blocking)
win.setWindowTitle(name)
for w in widget_list:
win.tab.addTab(w[0], w[1])
return win | def function[QTabCapsulate, parameter[self, name, widget_list, blocking]]:
constant[Helper function that encapsulates QWidget into a QMainWindow
:param widget_list: List of tuples : [(widget,"name"), (widget,"name"), ..]
]
class class[QuickWindow, parameter[]] begin[:]
class class[Signals, parameter[]] begin[:]
variable[close] assign[=] call[name[QtCore].Signal, parameter[]]
variable[show] assign[=] call[name[QtCore].Signal, parameter[]]
def function[__init__, parameter[self, blocking, parent]]:
call[call[name[super], parameter[]].__init__, parameter[name[parent]]]
name[self].propagate assign[=] constant[True]
call[name[self].setStyleSheet, parameter[name[style].main_gui]]
if name[blocking] begin[:]
call[name[self].setWindowModality, parameter[name[QtCore].Qt.ApplicationModal]]
name[self].signals assign[=] call[name[self].Signals, parameter[]]
name[self].tab assign[=] call[name[QtWidgets].QTabWidget, parameter[]]
call[name[self].setCentralWidget, parameter[name[self].tab]]
call[name[self].setLayout, parameter[call[name[QtWidgets].QHBoxLayout, parameter[]]]]
def function[closeEvent, parameter[self, e]]:
if name[self].propagate begin[:]
call[name[self].signals.close.emit, parameter[]]
call[name[e].accept, parameter[]]
def function[showEvent, parameter[self, e]]:
if name[self].propagate begin[:]
call[name[self].signals.show.emit, parameter[]]
call[name[e].accept, parameter[]]
def function[setPropagate, parameter[self]]:
name[self].propagate assign[=] constant[True]
def function[unSetPropagate, parameter[self]]:
name[self].propagate assign[=] constant[False]
variable[win] assign[=] call[name[QuickWindow], parameter[]]
call[name[win].setWindowTitle, parameter[name[name]]]
for taget[name[w]] in starred[name[widget_list]] begin[:]
call[name[win].tab.addTab, parameter[call[name[w]][constant[0]], call[name[w]][constant[1]]]]
return[name[win]] | keyword[def] identifier[QTabCapsulate] ( identifier[self] , identifier[name] , identifier[widget_list] , identifier[blocking] = keyword[False] ):
literal[string]
keyword[class] identifier[QuickWindow] ( identifier[QtWidgets] . identifier[QMainWindow] ):
keyword[class] identifier[Signals] ( identifier[QtCore] . identifier[QObject] ):
identifier[close] = identifier[QtCore] . identifier[Signal] ()
identifier[show] = identifier[QtCore] . identifier[Signal] ()
keyword[def] identifier[__init__] ( identifier[self] , identifier[blocking] = keyword[False] , identifier[parent] = keyword[None] ):
identifier[super] (). identifier[__init__] ( identifier[parent] )
identifier[self] . identifier[propagate] = keyword[True]
identifier[self] . identifier[setStyleSheet] ( identifier[style] . identifier[main_gui] )
keyword[if] ( identifier[blocking] ):
identifier[self] . identifier[setWindowModality] ( identifier[QtCore] . identifier[Qt] . identifier[ApplicationModal] )
identifier[self] . identifier[signals] = identifier[self] . identifier[Signals] ()
identifier[self] . identifier[tab] = identifier[QtWidgets] . identifier[QTabWidget] ()
identifier[self] . identifier[setCentralWidget] ( identifier[self] . identifier[tab] )
identifier[self] . identifier[setLayout] ( identifier[QtWidgets] . identifier[QHBoxLayout] ())
keyword[def] identifier[closeEvent] ( identifier[self] , identifier[e] ):
keyword[if] ( identifier[self] . identifier[propagate] ):
identifier[self] . identifier[signals] . identifier[close] . identifier[emit] ()
identifier[e] . identifier[accept] ()
keyword[def] identifier[showEvent] ( identifier[self] , identifier[e] ):
keyword[if] ( identifier[self] . identifier[propagate] ):
identifier[self] . identifier[signals] . identifier[show] . identifier[emit] ()
identifier[e] . identifier[accept] ()
keyword[def] identifier[setPropagate] ( identifier[self] ):
identifier[self] . identifier[propagate] = keyword[True]
keyword[def] identifier[unSetPropagate] ( identifier[self] ):
identifier[self] . identifier[propagate] = keyword[False]
identifier[win] = identifier[QuickWindow] ( identifier[blocking] = identifier[blocking] )
identifier[win] . identifier[setWindowTitle] ( identifier[name] )
keyword[for] identifier[w] keyword[in] identifier[widget_list] :
identifier[win] . identifier[tab] . identifier[addTab] ( identifier[w] [ literal[int] ], identifier[w] [ literal[int] ])
keyword[return] identifier[win] | def QTabCapsulate(self, name, widget_list, blocking=False):
"""Helper function that encapsulates QWidget into a QMainWindow
:param widget_list: List of tuples : [(widget,"name"), (widget,"name"), ..]
"""
class QuickWindow(QtWidgets.QMainWindow):
class Signals(QtCore.QObject):
close = QtCore.Signal()
show = QtCore.Signal()
def __init__(self, blocking=False, parent=None):
super().__init__(parent)
self.propagate = True # send signals or not
self.setStyleSheet(style.main_gui)
if blocking:
self.setWindowModality(QtCore.Qt.ApplicationModal) # depends on [control=['if'], data=[]]
self.signals = self.Signals()
self.tab = QtWidgets.QTabWidget()
self.setCentralWidget(self.tab)
self.setLayout(QtWidgets.QHBoxLayout())
def closeEvent(self, e):
if self.propagate:
self.signals.close.emit() # depends on [control=['if'], data=[]]
e.accept()
def showEvent(self, e):
if self.propagate:
self.signals.show.emit() # depends on [control=['if'], data=[]]
e.accept()
def setPropagate(self):
self.propagate = True
def unSetPropagate(self):
self.propagate = False
win = QuickWindow(blocking=blocking)
win.setWindowTitle(name)
for w in widget_list:
win.tab.addTab(w[0], w[1]) # depends on [control=['for'], data=['w']]
return win |
def upload_model(self, path: str, meta: dict, force: bool) -> str:
"""
Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing.
"""
raise NotImplementedError | def function[upload_model, parameter[self, path, meta, force]]:
constant[
Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing.
]
<ast.Raise object at 0x7da1b0c8ba90> | keyword[def] identifier[upload_model] ( identifier[self] , identifier[path] : identifier[str] , identifier[meta] : identifier[dict] , identifier[force] : identifier[bool] )-> identifier[str] :
literal[string]
keyword[raise] identifier[NotImplementedError] | def upload_model(self, path: str, meta: dict, force: bool) -> str:
"""
Put the given file to the remote storage.
:param path: Path to the model file.
:param meta: Metadata of the model.
:param force: Overwrite an existing model.
:return: URL of the uploaded model.
:raises BackendRequiredError: If supplied bucket is unusable.
:raises ModelAlreadyExistsError: If model already exists and no forcing.
"""
raise NotImplementedError |
def calc_lfp_layer(self):
"""
Calculate the LFP from concatenated subpopulations residing in a
certain layer, e.g all L4E pops are summed, according to the `mapping_Yy`
attribute of the `hybridLFPy.Population` objects.
"""
LFPdict = {}
lastY = None
for Y, y in self.mapping_Yy:
if lastY != Y:
try:
LFPdict.update({Y : self.LFPdict[y]})
except KeyError:
pass
else:
try:
LFPdict[Y] += self.LFPdict[y]
except KeyError:
pass
lastY = Y
return LFPdict | def function[calc_lfp_layer, parameter[self]]:
constant[
Calculate the LFP from concatenated subpopulations residing in a
certain layer, e.g all L4E pops are summed, according to the `mapping_Yy`
attribute of the `hybridLFPy.Population` objects.
]
variable[LFPdict] assign[=] dictionary[[], []]
variable[lastY] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b0cfe020>, <ast.Name object at 0x7da1b0cffdc0>]]] in starred[name[self].mapping_Yy] begin[:]
if compare[name[lastY] not_equal[!=] name[Y]] begin[:]
<ast.Try object at 0x7da1b0cfea70>
variable[lastY] assign[=] name[Y]
return[name[LFPdict]] | keyword[def] identifier[calc_lfp_layer] ( identifier[self] ):
literal[string]
identifier[LFPdict] ={}
identifier[lastY] = keyword[None]
keyword[for] identifier[Y] , identifier[y] keyword[in] identifier[self] . identifier[mapping_Yy] :
keyword[if] identifier[lastY] != identifier[Y] :
keyword[try] :
identifier[LFPdict] . identifier[update] ({ identifier[Y] : identifier[self] . identifier[LFPdict] [ identifier[y] ]})
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[LFPdict] [ identifier[Y] ]+= identifier[self] . identifier[LFPdict] [ identifier[y] ]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[lastY] = identifier[Y]
keyword[return] identifier[LFPdict] | def calc_lfp_layer(self):
"""
Calculate the LFP from concatenated subpopulations residing in a
certain layer, e.g all L4E pops are summed, according to the `mapping_Yy`
attribute of the `hybridLFPy.Population` objects.
"""
LFPdict = {}
lastY = None
for (Y, y) in self.mapping_Yy:
if lastY != Y:
try:
LFPdict.update({Y: self.LFPdict[y]}) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['Y']]
else:
try:
LFPdict[Y] += self.LFPdict[y] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
lastY = Y # depends on [control=['for'], data=[]]
return LFPdict |
def orientation(self, theta, B_theta, force=False):
"""
Returns the orientation envelope:
We use a von-Mises distribution on the orientation:
- mean orientation is ``theta`` (in radians),
- ``B_theta`` is the bandwidth (in radians). It is equal to the standard deviation of the Gaussian
envelope which approximate the distribution for low bandwidths. The Half-Width at Half Height is
given by approximately np.sqrt(2*B_theta_**2*np.log(2)).
# selecting one direction, theta is the mean direction, B_theta the spread
# we use a von-mises distribution on the orientation
# see http://en.wikipedia.org/wiki/Von_Mises_distribution
"""
if B_theta is np.inf: # for large bandwidth, returns a strictly flat envelope
enveloppe_orientation = 1.
elif self.pe.use_cache and not force:
tag = str(theta) + '_' + str(B_theta)
try:
return self.cache['orientation'][tag]
except:
if self.pe.verbose>50: print('doing orientation cache for tag ', tag)
self.cache['orientation'][tag] = self.orientation(theta, B_theta, force=True)
return self.cache['orientation'][tag]
else: # non pathological case
# As shown in:
# http://www.csse.uwa.edu.au/~pk/research/matlabfns/PhaseCongruency/Docs/convexpl.html
# this single bump allows (without the symmetric) to code both symmetric
# and anti-symmetric parts in one shot.
cos_angle = np.cos(self.f_theta-theta)
enveloppe_orientation = np.exp(cos_angle/B_theta**2)
return enveloppe_orientation | def function[orientation, parameter[self, theta, B_theta, force]]:
constant[
Returns the orientation envelope:
We use a von-Mises distribution on the orientation:
- mean orientation is ``theta`` (in radians),
- ``B_theta`` is the bandwidth (in radians). It is equal to the standard deviation of the Gaussian
envelope which approximate the distribution for low bandwidths. The Half-Width at Half Height is
given by approximately np.sqrt(2*B_theta_**2*np.log(2)).
# selecting one direction, theta is the mean direction, B_theta the spread
# we use a von-mises distribution on the orientation
# see http://en.wikipedia.org/wiki/Von_Mises_distribution
]
if compare[name[B_theta] is name[np].inf] begin[:]
variable[enveloppe_orientation] assign[=] constant[1.0]
return[name[enveloppe_orientation]] | keyword[def] identifier[orientation] ( identifier[self] , identifier[theta] , identifier[B_theta] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[B_theta] keyword[is] identifier[np] . identifier[inf] :
identifier[enveloppe_orientation] = literal[int]
keyword[elif] identifier[self] . identifier[pe] . identifier[use_cache] keyword[and] keyword[not] identifier[force] :
identifier[tag] = identifier[str] ( identifier[theta] )+ literal[string] + identifier[str] ( identifier[B_theta] )
keyword[try] :
keyword[return] identifier[self] . identifier[cache] [ literal[string] ][ identifier[tag] ]
keyword[except] :
keyword[if] identifier[self] . identifier[pe] . identifier[verbose] > literal[int] : identifier[print] ( literal[string] , identifier[tag] )
identifier[self] . identifier[cache] [ literal[string] ][ identifier[tag] ]= identifier[self] . identifier[orientation] ( identifier[theta] , identifier[B_theta] , identifier[force] = keyword[True] )
keyword[return] identifier[self] . identifier[cache] [ literal[string] ][ identifier[tag] ]
keyword[else] :
identifier[cos_angle] = identifier[np] . identifier[cos] ( identifier[self] . identifier[f_theta] - identifier[theta] )
identifier[enveloppe_orientation] = identifier[np] . identifier[exp] ( identifier[cos_angle] / identifier[B_theta] ** literal[int] )
keyword[return] identifier[enveloppe_orientation] | def orientation(self, theta, B_theta, force=False):
"""
Returns the orientation envelope:
We use a von-Mises distribution on the orientation:
- mean orientation is ``theta`` (in radians),
- ``B_theta`` is the bandwidth (in radians). It is equal to the standard deviation of the Gaussian
envelope which approximate the distribution for low bandwidths. The Half-Width at Half Height is
given by approximately np.sqrt(2*B_theta_**2*np.log(2)).
# selecting one direction, theta is the mean direction, B_theta the spread
# we use a von-mises distribution on the orientation
# see http://en.wikipedia.org/wiki/Von_Mises_distribution
"""
if B_theta is np.inf: # for large bandwidth, returns a strictly flat envelope
enveloppe_orientation = 1.0 # depends on [control=['if'], data=[]]
elif self.pe.use_cache and (not force):
tag = str(theta) + '_' + str(B_theta)
try:
return self.cache['orientation'][tag] # depends on [control=['try'], data=[]]
except:
if self.pe.verbose > 50:
print('doing orientation cache for tag ', tag) # depends on [control=['if'], data=[]]
self.cache['orientation'][tag] = self.orientation(theta, B_theta, force=True)
return self.cache['orientation'][tag] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else: # non pathological case
# As shown in:
# http://www.csse.uwa.edu.au/~pk/research/matlabfns/PhaseCongruency/Docs/convexpl.html
# this single bump allows (without the symmetric) to code both symmetric
# and anti-symmetric parts in one shot.
cos_angle = np.cos(self.f_theta - theta)
enveloppe_orientation = np.exp(cos_angle / B_theta ** 2)
return enveloppe_orientation |
def outformat_is_text():
"""
Only safe to call within a click context.
"""
ctx = click.get_current_context()
state = ctx.ensure_object(CommandState)
return state.outformat_is_text() | def function[outformat_is_text, parameter[]]:
constant[
Only safe to call within a click context.
]
variable[ctx] assign[=] call[name[click].get_current_context, parameter[]]
variable[state] assign[=] call[name[ctx].ensure_object, parameter[name[CommandState]]]
return[call[name[state].outformat_is_text, parameter[]]] | keyword[def] identifier[outformat_is_text] ():
literal[string]
identifier[ctx] = identifier[click] . identifier[get_current_context] ()
identifier[state] = identifier[ctx] . identifier[ensure_object] ( identifier[CommandState] )
keyword[return] identifier[state] . identifier[outformat_is_text] () | def outformat_is_text():
"""
Only safe to call within a click context.
"""
ctx = click.get_current_context()
state = ctx.ensure_object(CommandState)
return state.outformat_is_text() |
def scale(self, scale, center=None):
"""
Scale the matrix about a given origin.
The scaling is applied *after* the transformations already present
in the matrix.
Parameters
----------
scale : array-like
Scale factors along x, y and z axes.
center : array-like or None
The x, y and z coordinates to scale around. If None,
(0, 0, 0) will be used.
"""
scale = transforms.scale(as_vec4(scale, default=(1, 1, 1, 1))[0, :3])
if center is not None:
center = as_vec4(center)[0, :3]
scale = np.dot(np.dot(transforms.translate(-center), scale),
transforms.translate(center))
self.matrix = np.dot(self.matrix, scale) | def function[scale, parameter[self, scale, center]]:
constant[
Scale the matrix about a given origin.
The scaling is applied *after* the transformations already present
in the matrix.
Parameters
----------
scale : array-like
Scale factors along x, y and z axes.
center : array-like or None
The x, y and z coordinates to scale around. If None,
(0, 0, 0) will be used.
]
variable[scale] assign[=] call[name[transforms].scale, parameter[call[call[name[as_vec4], parameter[name[scale]]]][tuple[[<ast.Constant object at 0x7da1b10ec9a0>, <ast.Slice object at 0x7da1b10ec880>]]]]]
if compare[name[center] is_not constant[None]] begin[:]
variable[center] assign[=] call[call[name[as_vec4], parameter[name[center]]]][tuple[[<ast.Constant object at 0x7da1b10ece50>, <ast.Slice object at 0x7da1b10ec0a0>]]]
variable[scale] assign[=] call[name[np].dot, parameter[call[name[np].dot, parameter[call[name[transforms].translate, parameter[<ast.UnaryOp object at 0x7da1b10eda50>]], name[scale]]], call[name[transforms].translate, parameter[name[center]]]]]
name[self].matrix assign[=] call[name[np].dot, parameter[name[self].matrix, name[scale]]] | keyword[def] identifier[scale] ( identifier[self] , identifier[scale] , identifier[center] = keyword[None] ):
literal[string]
identifier[scale] = identifier[transforms] . identifier[scale] ( identifier[as_vec4] ( identifier[scale] , identifier[default] =( literal[int] , literal[int] , literal[int] , literal[int] ))[ literal[int] ,: literal[int] ])
keyword[if] identifier[center] keyword[is] keyword[not] keyword[None] :
identifier[center] = identifier[as_vec4] ( identifier[center] )[ literal[int] ,: literal[int] ]
identifier[scale] = identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[transforms] . identifier[translate] (- identifier[center] ), identifier[scale] ),
identifier[transforms] . identifier[translate] ( identifier[center] ))
identifier[self] . identifier[matrix] = identifier[np] . identifier[dot] ( identifier[self] . identifier[matrix] , identifier[scale] ) | def scale(self, scale, center=None):
"""
Scale the matrix about a given origin.
The scaling is applied *after* the transformations already present
in the matrix.
Parameters
----------
scale : array-like
Scale factors along x, y and z axes.
center : array-like or None
The x, y and z coordinates to scale around. If None,
(0, 0, 0) will be used.
"""
scale = transforms.scale(as_vec4(scale, default=(1, 1, 1, 1))[0, :3])
if center is not None:
center = as_vec4(center)[0, :3]
scale = np.dot(np.dot(transforms.translate(-center), scale), transforms.translate(center)) # depends on [control=['if'], data=['center']]
self.matrix = np.dot(self.matrix, scale) |
def process_account(account_info):
"""Scan all buckets in an account and schedule processing"""
log = logging.getLogger('salactus.bucket-iterator')
log.info("processing account %s", account_info)
session = get_session(account_info)
client = session.client('s3', config=s3config)
buckets = client.list_buckets()['Buckets']
connection.hset(
'bucket-accounts', account_info['name'], json.dumps(account_info))
for b in buckets:
connection.hset(
'bucket-ages', bucket_id(account_info, b['Name']),
b['CreationDate'].isoformat())
account_buckets = account_info.pop('buckets', None)
buckets = [n['Name'] for n in buckets
if not account_buckets or
n['Name'] in account_buckets]
account_not_buckets = account_info.pop('not-buckets', None)
buckets = [n for n in buckets
if not account_not_buckets or
n not in account_not_buckets]
log.info("processing %d buckets in account %s",
len(buckets), account_info['name'])
for bucket_set in chunks(buckets, 50):
invoke(process_bucket_set, account_info, bucket_set) | def function[process_account, parameter[account_info]]:
constant[Scan all buckets in an account and schedule processing]
variable[log] assign[=] call[name[logging].getLogger, parameter[constant[salactus.bucket-iterator]]]
call[name[log].info, parameter[constant[processing account %s], name[account_info]]]
variable[session] assign[=] call[name[get_session], parameter[name[account_info]]]
variable[client] assign[=] call[name[session].client, parameter[constant[s3]]]
variable[buckets] assign[=] call[call[name[client].list_buckets, parameter[]]][constant[Buckets]]
call[name[connection].hset, parameter[constant[bucket-accounts], call[name[account_info]][constant[name]], call[name[json].dumps, parameter[name[account_info]]]]]
for taget[name[b]] in starred[name[buckets]] begin[:]
call[name[connection].hset, parameter[constant[bucket-ages], call[name[bucket_id], parameter[name[account_info], call[name[b]][constant[Name]]]], call[call[name[b]][constant[CreationDate]].isoformat, parameter[]]]]
variable[account_buckets] assign[=] call[name[account_info].pop, parameter[constant[buckets], constant[None]]]
variable[buckets] assign[=] <ast.ListComp object at 0x7da1b1c3d150>
variable[account_not_buckets] assign[=] call[name[account_info].pop, parameter[constant[not-buckets], constant[None]]]
variable[buckets] assign[=] <ast.ListComp object at 0x7da1b1c3dd50>
call[name[log].info, parameter[constant[processing %d buckets in account %s], call[name[len], parameter[name[buckets]]], call[name[account_info]][constant[name]]]]
for taget[name[bucket_set]] in starred[call[name[chunks], parameter[name[buckets], constant[50]]]] begin[:]
call[name[invoke], parameter[name[process_bucket_set], name[account_info], name[bucket_set]]] | keyword[def] identifier[process_account] ( identifier[account_info] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[log] . identifier[info] ( literal[string] , identifier[account_info] )
identifier[session] = identifier[get_session] ( identifier[account_info] )
identifier[client] = identifier[session] . identifier[client] ( literal[string] , identifier[config] = identifier[s3config] )
identifier[buckets] = identifier[client] . identifier[list_buckets] ()[ literal[string] ]
identifier[connection] . identifier[hset] (
literal[string] , identifier[account_info] [ literal[string] ], identifier[json] . identifier[dumps] ( identifier[account_info] ))
keyword[for] identifier[b] keyword[in] identifier[buckets] :
identifier[connection] . identifier[hset] (
literal[string] , identifier[bucket_id] ( identifier[account_info] , identifier[b] [ literal[string] ]),
identifier[b] [ literal[string] ]. identifier[isoformat] ())
identifier[account_buckets] = identifier[account_info] . identifier[pop] ( literal[string] , keyword[None] )
identifier[buckets] =[ identifier[n] [ literal[string] ] keyword[for] identifier[n] keyword[in] identifier[buckets]
keyword[if] keyword[not] identifier[account_buckets] keyword[or]
identifier[n] [ literal[string] ] keyword[in] identifier[account_buckets] ]
identifier[account_not_buckets] = identifier[account_info] . identifier[pop] ( literal[string] , keyword[None] )
identifier[buckets] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[buckets]
keyword[if] keyword[not] identifier[account_not_buckets] keyword[or]
identifier[n] keyword[not] keyword[in] identifier[account_not_buckets] ]
identifier[log] . identifier[info] ( literal[string] ,
identifier[len] ( identifier[buckets] ), identifier[account_info] [ literal[string] ])
keyword[for] identifier[bucket_set] keyword[in] identifier[chunks] ( identifier[buckets] , literal[int] ):
identifier[invoke] ( identifier[process_bucket_set] , identifier[account_info] , identifier[bucket_set] ) | def process_account(account_info):
"""Scan all buckets in an account and schedule processing"""
log = logging.getLogger('salactus.bucket-iterator')
log.info('processing account %s', account_info)
session = get_session(account_info)
client = session.client('s3', config=s3config)
buckets = client.list_buckets()['Buckets']
connection.hset('bucket-accounts', account_info['name'], json.dumps(account_info))
for b in buckets:
connection.hset('bucket-ages', bucket_id(account_info, b['Name']), b['CreationDate'].isoformat()) # depends on [control=['for'], data=['b']]
account_buckets = account_info.pop('buckets', None)
buckets = [n['Name'] for n in buckets if not account_buckets or n['Name'] in account_buckets]
account_not_buckets = account_info.pop('not-buckets', None)
buckets = [n for n in buckets if not account_not_buckets or n not in account_not_buckets]
log.info('processing %d buckets in account %s', len(buckets), account_info['name'])
for bucket_set in chunks(buckets, 50):
invoke(process_bucket_set, account_info, bucket_set) # depends on [control=['for'], data=['bucket_set']] |
def can_update_activities(self):
"""Tests if this user can update Activities.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an Activity
will result in a PermissionDenied. This is intended as a hint
to an application that may opt not to offer update operations to
an unauthorized user.
return: (boolean) - false if activity modification is not
authorized, true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['activityHints']['canUpdate'] | def function[can_update_activities, parameter[self]]:
constant[Tests if this user can update Activities.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an Activity
will result in a PermissionDenied. This is intended as a hint
to an application that may opt not to offer update operations to
an unauthorized user.
return: (boolean) - false if activity modification is not
authorized, true otherwise
compliance: mandatory - This method must be implemented.
]
variable[url_path] assign[=] call[name[construct_url], parameter[constant[authorization]]]
return[call[call[call[name[self]._get_request, parameter[name[url_path]]]][constant[activityHints]]][constant[canUpdate]]] | keyword[def] identifier[can_update_activities] ( identifier[self] ):
literal[string]
identifier[url_path] = identifier[construct_url] ( literal[string] ,
identifier[bank_id] = identifier[self] . identifier[_catalog_idstr] )
keyword[return] identifier[self] . identifier[_get_request] ( identifier[url_path] )[ literal[string] ][ literal[string] ] | def can_update_activities(self):
"""Tests if this user can update Activities.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an Activity
will result in a PermissionDenied. This is intended as a hint
to an application that may opt not to offer update operations to
an unauthorized user.
return: (boolean) - false if activity modification is not
authorized, true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization', bank_id=self._catalog_idstr)
return self._get_request(url_path)['activityHints']['canUpdate'] |
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups | def function[CaseGroups, parameter[unicode_dir]]:
constant[Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
]
variable[togroup] assign[=] dictionary[[], []]
def function[DoLine, parameter[codes, fields]]:
constant[Process single CaseFolding.txt line, updating togroup.]
<ast.Tuple object at 0x7da2041d9c00> assign[=] name[fields]
if compare[name[foldtype] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da2041da050>, <ast.Constant object at 0x7da2041dbc70>]]] begin[:]
return[None]
variable[lower] assign[=] call[name[_UInt], parameter[name[lower]]]
call[call[name[togroup].setdefault, parameter[name[lower], list[[<ast.Name object at 0x7da2041d96f0>]]]].extend, parameter[name[codes]]]
call[name[ReadUnicodeTable], parameter[binary_operation[name[unicode_dir] + constant[/CaseFolding.txt]], constant[4], name[DoLine]]]
variable[groups] assign[=] call[name[togroup].values, parameter[]]
for taget[name[g]] in starred[name[groups]] begin[:]
call[name[g].sort, parameter[]]
call[name[groups].sort, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b26adbd0>, <ast.Name object at 0x7da1b26ae9e0>]]] | keyword[def] identifier[CaseGroups] ( identifier[unicode_dir] = identifier[_UNICODE_DIR] ):
literal[string]
identifier[togroup] ={}
keyword[def] identifier[DoLine] ( identifier[codes] , identifier[fields] ):
literal[string]
( identifier[_] , identifier[foldtype] , identifier[lower] , identifier[_] )= identifier[fields]
keyword[if] identifier[foldtype] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[return]
identifier[lower] = identifier[_UInt] ( identifier[lower] )
identifier[togroup] . identifier[setdefault] ( identifier[lower] ,[ identifier[lower] ]). identifier[extend] ( identifier[codes] )
identifier[ReadUnicodeTable] ( identifier[unicode_dir] + literal[string] , literal[int] , identifier[DoLine] )
identifier[groups] = identifier[togroup] . identifier[values] ()
keyword[for] identifier[g] keyword[in] identifier[groups] :
identifier[g] . identifier[sort] ()
identifier[groups] . identifier[sort] ()
keyword[return] identifier[togroup] , identifier[groups] | def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ('C', 'S'):
return # depends on [control=['if'], data=[]]
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir + '/CaseFolding.txt', 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort() # depends on [control=['for'], data=['g']]
groups.sort()
return (togroup, groups) |
def jump(self, relcode, opposite, label):
"""Generates a jump instruction
relcode - relational operator code
opposite - generate normal or opposite jump
label - jump label
"""
jump = self.OPPOSITE_JUMPS[relcode] if opposite else self.CONDITIONAL_JUMPS[relcode]
self.newline_text("{0}\t{1}".format(jump, label), True) | def function[jump, parameter[self, relcode, opposite, label]]:
constant[Generates a jump instruction
relcode - relational operator code
opposite - generate normal or opposite jump
label - jump label
]
variable[jump] assign[=] <ast.IfExp object at 0x7da18c4ccfd0>
call[name[self].newline_text, parameter[call[constant[{0} {1}].format, parameter[name[jump], name[label]]], constant[True]]] | keyword[def] identifier[jump] ( identifier[self] , identifier[relcode] , identifier[opposite] , identifier[label] ):
literal[string]
identifier[jump] = identifier[self] . identifier[OPPOSITE_JUMPS] [ identifier[relcode] ] keyword[if] identifier[opposite] keyword[else] identifier[self] . identifier[CONDITIONAL_JUMPS] [ identifier[relcode] ]
identifier[self] . identifier[newline_text] ( literal[string] . identifier[format] ( identifier[jump] , identifier[label] ), keyword[True] ) | def jump(self, relcode, opposite, label):
"""Generates a jump instruction
relcode - relational operator code
opposite - generate normal or opposite jump
label - jump label
"""
jump = self.OPPOSITE_JUMPS[relcode] if opposite else self.CONDITIONAL_JUMPS[relcode]
self.newline_text('{0}\t{1}'.format(jump, label), True) |
def maps_get_rules_output_rules_monitor(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
monitor = ET.SubElement(rules, "monitor")
monitor.text = kwargs.pop('monitor')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[maps_get_rules_output_rules_monitor, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[maps_get_rules] assign[=] call[name[ET].Element, parameter[constant[maps_get_rules]]]
variable[config] assign[=] name[maps_get_rules]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[maps_get_rules], constant[output]]]
variable[rules] assign[=] call[name[ET].SubElement, parameter[name[output], constant[rules]]]
variable[monitor] assign[=] call[name[ET].SubElement, parameter[name[rules], constant[monitor]]]
name[monitor].text assign[=] call[name[kwargs].pop, parameter[constant[monitor]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[maps_get_rules_output_rules_monitor] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[maps_get_rules] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[maps_get_rules]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[maps_get_rules] , literal[string] )
identifier[rules] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[monitor] = identifier[ET] . identifier[SubElement] ( identifier[rules] , literal[string] )
identifier[monitor] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def maps_get_rules_output_rules_monitor(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
maps_get_rules = ET.Element('maps_get_rules')
config = maps_get_rules
output = ET.SubElement(maps_get_rules, 'output')
rules = ET.SubElement(output, 'rules')
monitor = ET.SubElement(rules, 'monitor')
monitor.text = kwargs.pop('monitor')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _input_directory_description(input_identifier, arg_item, input_dir):
"""
Produces a directory description. A directory description is a dictionary containing the following information.
- 'path': An array containing the paths to the specified directories.
- 'debugInfo': A field to possibly provide debug information.
- 'found': A boolean that indicates, if the directory exists in the local filesystem.
- 'listing': A listing that shows which files are in the given directory. This could be None.
:param input_identifier: The input identifier in the cwl description file
:param arg_item: The corresponding job information
:param input_dir: TODO
:return: A directory description
:raise DirectoryError: If the given directory does not exist or is not a directory.
"""
description = {
'path': None,
'found': False,
'debugInfo': None,
'listing': None,
'basename': None
}
try:
path = location(input_identifier, arg_item)
if input_dir and not os.path.isabs(path):
path = os.path.join(os.path.expanduser(input_dir), path)
description['path'] = path
if not os.path.exists(path):
raise DirectoryError('path does not exist')
if not os.path.isdir(path):
raise DirectoryError('path is not a directory')
description['listing'] = arg_item.get('listing')
description['basename'] = os.path.basename(path)
description['found'] = True
except:
description['debugInfo'] = exception_format()
return description | def function[_input_directory_description, parameter[input_identifier, arg_item, input_dir]]:
constant[
Produces a directory description. A directory description is a dictionary containing the following information.
- 'path': An array containing the paths to the specified directories.
- 'debugInfo': A field to possibly provide debug information.
- 'found': A boolean that indicates, if the directory exists in the local filesystem.
- 'listing': A listing that shows which files are in the given directory. This could be None.
:param input_identifier: The input identifier in the cwl description file
:param arg_item: The corresponding job information
:param input_dir: TODO
:return: A directory description
:raise DirectoryError: If the given directory does not exist or is not a directory.
]
variable[description] assign[=] dictionary[[<ast.Constant object at 0x7da1b10c3e20>, <ast.Constant object at 0x7da1b10c1ae0>, <ast.Constant object at 0x7da1b10c0d00>, <ast.Constant object at 0x7da1b10c0040>, <ast.Constant object at 0x7da1b10c24d0>], [<ast.Constant object at 0x7da1b10c28f0>, <ast.Constant object at 0x7da1b10c2dd0>, <ast.Constant object at 0x7da1b10c3bb0>, <ast.Constant object at 0x7da1b10c3970>, <ast.Constant object at 0x7da1b10c0f10>]]
<ast.Try object at 0x7da1b10c0b50>
return[name[description]] | keyword[def] identifier[_input_directory_description] ( identifier[input_identifier] , identifier[arg_item] , identifier[input_dir] ):
literal[string]
identifier[description] ={
literal[string] : keyword[None] ,
literal[string] : keyword[False] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None]
}
keyword[try] :
identifier[path] = identifier[location] ( identifier[input_identifier] , identifier[arg_item] )
keyword[if] identifier[input_dir] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[path] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[input_dir] ), identifier[path] )
identifier[description] [ literal[string] ]= identifier[path]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[DirectoryError] ( literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[raise] identifier[DirectoryError] ( literal[string] )
identifier[description] [ literal[string] ]= identifier[arg_item] . identifier[get] ( literal[string] )
identifier[description] [ literal[string] ]= identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
identifier[description] [ literal[string] ]= keyword[True]
keyword[except] :
identifier[description] [ literal[string] ]= identifier[exception_format] ()
keyword[return] identifier[description] | def _input_directory_description(input_identifier, arg_item, input_dir):
"""
Produces a directory description. A directory description is a dictionary containing the following information.
- 'path': An array containing the paths to the specified directories.
- 'debugInfo': A field to possibly provide debug information.
- 'found': A boolean that indicates, if the directory exists in the local filesystem.
- 'listing': A listing that shows which files are in the given directory. This could be None.
:param input_identifier: The input identifier in the cwl description file
:param arg_item: The corresponding job information
:param input_dir: TODO
:return: A directory description
:raise DirectoryError: If the given directory does not exist or is not a directory.
"""
description = {'path': None, 'found': False, 'debugInfo': None, 'listing': None, 'basename': None}
try:
path = location(input_identifier, arg_item)
if input_dir and (not os.path.isabs(path)):
path = os.path.join(os.path.expanduser(input_dir), path) # depends on [control=['if'], data=[]]
description['path'] = path
if not os.path.exists(path):
raise DirectoryError('path does not exist') # depends on [control=['if'], data=[]]
if not os.path.isdir(path):
raise DirectoryError('path is not a directory') # depends on [control=['if'], data=[]]
description['listing'] = arg_item.get('listing')
description['basename'] = os.path.basename(path)
description['found'] = True # depends on [control=['try'], data=[]]
except:
description['debugInfo'] = exception_format() # depends on [control=['except'], data=[]]
return description |
def extend_primary_key(self, new_attributes):
"""
Create a new heading in which the primary key also includes new_attributes.
:param new_attributes: new attributes to be added to the primary key.
"""
try: # check for missing attributes
raise DataJointError('Attribute `%s` is not found' % next(a for a in new_attributes if a not in self.names))
except StopIteration:
return Heading(dict(v.todict(), in_key=v.in_key or v.name in new_attributes)
for v in self.attributes.values()) | def function[extend_primary_key, parameter[self, new_attributes]]:
constant[
Create a new heading in which the primary key also includes new_attributes.
:param new_attributes: new attributes to be added to the primary key.
]
<ast.Try object at 0x7da18fe92890> | keyword[def] identifier[extend_primary_key] ( identifier[self] , identifier[new_attributes] ):
literal[string]
keyword[try] :
keyword[raise] identifier[DataJointError] ( literal[string] % identifier[next] ( identifier[a] keyword[for] identifier[a] keyword[in] identifier[new_attributes] keyword[if] identifier[a] keyword[not] keyword[in] identifier[self] . identifier[names] ))
keyword[except] identifier[StopIteration] :
keyword[return] identifier[Heading] ( identifier[dict] ( identifier[v] . identifier[todict] (), identifier[in_key] = identifier[v] . identifier[in_key] keyword[or] identifier[v] . identifier[name] keyword[in] identifier[new_attributes] )
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[attributes] . identifier[values] ()) | def extend_primary_key(self, new_attributes):
"""
Create a new heading in which the primary key also includes new_attributes.
:param new_attributes: new attributes to be added to the primary key.
"""
try: # check for missing attributes
raise DataJointError('Attribute `%s` is not found' % next((a for a in new_attributes if a not in self.names))) # depends on [control=['try'], data=[]]
except StopIteration:
return Heading((dict(v.todict(), in_key=v.in_key or v.name in new_attributes) for v in self.attributes.values())) # depends on [control=['except'], data=[]] |
def _obj_display(obj, display=''):
"""Returns string representation of an object, either the default or based
on the display template passed in.
"""
result = ''
if not display:
result = str(obj)
else:
template = Template(display)
context = Context({'obj':obj})
result = template.render(context)
return result | def function[_obj_display, parameter[obj, display]]:
constant[Returns string representation of an object, either the default or based
on the display template passed in.
]
variable[result] assign[=] constant[]
if <ast.UnaryOp object at 0x7da1b0048670> begin[:]
variable[result] assign[=] call[name[str], parameter[name[obj]]]
return[name[result]] | keyword[def] identifier[_obj_display] ( identifier[obj] , identifier[display] = literal[string] ):
literal[string]
identifier[result] = literal[string]
keyword[if] keyword[not] identifier[display] :
identifier[result] = identifier[str] ( identifier[obj] )
keyword[else] :
identifier[template] = identifier[Template] ( identifier[display] )
identifier[context] = identifier[Context] ({ literal[string] : identifier[obj] })
identifier[result] = identifier[template] . identifier[render] ( identifier[context] )
keyword[return] identifier[result] | def _obj_display(obj, display=''):
"""Returns string representation of an object, either the default or based
on the display template passed in.
"""
result = ''
if not display:
result = str(obj) # depends on [control=['if'], data=[]]
else:
template = Template(display)
context = Context({'obj': obj})
result = template.render(context)
return result |
def clean(self, text):
"""Remove all unwanted characters from text."""
return ''.join([c for c in text if c in self.alphabet]) | def function[clean, parameter[self, text]]:
constant[Remove all unwanted characters from text.]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da18f58e590>]]] | keyword[def] identifier[clean] ( identifier[self] , identifier[text] ):
literal[string]
keyword[return] literal[string] . identifier[join] ([ identifier[c] keyword[for] identifier[c] keyword[in] identifier[text] keyword[if] identifier[c] keyword[in] identifier[self] . identifier[alphabet] ]) | def clean(self, text):
"""Remove all unwanted characters from text."""
return ''.join([c for c in text if c in self.alphabet]) |
def is_blacklisted_import(importer, fullname):
"""
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
"""
return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
(any(fullname.startswith(s) for s in importer.blacklist))) | def function[is_blacklisted_import, parameter[importer, fullname]]:
constant[
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
]
return[<ast.BoolOp object at 0x7da1b1d0c6a0>] | keyword[def] identifier[is_blacklisted_import] ( identifier[importer] , identifier[fullname] ):
literal[string]
keyword[return] (( keyword[not] identifier[any] ( identifier[fullname] . identifier[startswith] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[importer] . identifier[whitelist] )) keyword[or]
( identifier[any] ( identifier[fullname] . identifier[startswith] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[importer] . identifier[blacklist] ))) | def is_blacklisted_import(importer, fullname):
"""
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
"""
return not any((fullname.startswith(s) for s in importer.whitelist)) or any((fullname.startswith(s) for s in importer.blacklist)) |
def flatten_dict(self, d, delimiter="-", intermediates=False, parent_key=None):
"""
Flatten a dictionary.
Values that are dictionaries are flattened using delimiter in between
(eg. parent-child)
Values that are lists are flattened using delimiter
followed by the index (eg. parent-0)
example:
.. code-block:: python
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fruits': ['apple', 'peach', 'watermelon'],
'number': 52
}
# becomes
{
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
# if intermediates is True then we also get unflattened elements
# as well as the flattened ones.
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits': ['apple', 'peach', 'watermelon'],
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
"""
items = []
if isinstance(d, list):
d = dict(enumerate(d))
for k, v in d.items():
if parent_key:
k = u"{}{}{}".format(parent_key, delimiter, k)
if intermediates:
items.append((k, v))
if isinstance(v, list):
v = dict(enumerate(v))
if isinstance(v, collections.Mapping):
items.extend(
self.flatten_dict(v, delimiter, intermediates, str(k)).items()
)
else:
items.append((str(k), v))
return dict(items) | def function[flatten_dict, parameter[self, d, delimiter, intermediates, parent_key]]:
constant[
Flatten a dictionary.
Values that are dictionaries are flattened using delimiter in between
(eg. parent-child)
Values that are lists are flattened using delimiter
followed by the index (eg. parent-0)
example:
.. code-block:: python
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fruits': ['apple', 'peach', 'watermelon'],
'number': 52
}
# becomes
{
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
# if intermediates is True then we also get unflattened elements
# as well as the flattened ones.
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits': ['apple', 'peach', 'watermelon'],
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
]
variable[items] assign[=] list[[]]
if call[name[isinstance], parameter[name[d], name[list]]] begin[:]
variable[d] assign[=] call[name[dict], parameter[call[name[enumerate], parameter[name[d]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1d0ece0>, <ast.Name object at 0x7da1b1d0fee0>]]] in starred[call[name[d].items, parameter[]]] begin[:]
if name[parent_key] begin[:]
variable[k] assign[=] call[constant[{}{}{}].format, parameter[name[parent_key], name[delimiter], name[k]]]
if name[intermediates] begin[:]
call[name[items].append, parameter[tuple[[<ast.Name object at 0x7da1b1d0ee00>, <ast.Name object at 0x7da1b1d0d6f0>]]]]
if call[name[isinstance], parameter[name[v], name[list]]] begin[:]
variable[v] assign[=] call[name[dict], parameter[call[name[enumerate], parameter[name[v]]]]]
if call[name[isinstance], parameter[name[v], name[collections].Mapping]] begin[:]
call[name[items].extend, parameter[call[call[name[self].flatten_dict, parameter[name[v], name[delimiter], name[intermediates], call[name[str], parameter[name[k]]]]].items, parameter[]]]]
return[call[name[dict], parameter[name[items]]]] | keyword[def] identifier[flatten_dict] ( identifier[self] , identifier[d] , identifier[delimiter] = literal[string] , identifier[intermediates] = keyword[False] , identifier[parent_key] = keyword[None] ):
literal[string]
identifier[items] =[]
keyword[if] identifier[isinstance] ( identifier[d] , identifier[list] ):
identifier[d] = identifier[dict] ( identifier[enumerate] ( identifier[d] ))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ():
keyword[if] identifier[parent_key] :
identifier[k] = literal[string] . identifier[format] ( identifier[parent_key] , identifier[delimiter] , identifier[k] )
keyword[if] identifier[intermediates] :
identifier[items] . identifier[append] (( identifier[k] , identifier[v] ))
keyword[if] identifier[isinstance] ( identifier[v] , identifier[list] ):
identifier[v] = identifier[dict] ( identifier[enumerate] ( identifier[v] ))
keyword[if] identifier[isinstance] ( identifier[v] , identifier[collections] . identifier[Mapping] ):
identifier[items] . identifier[extend] (
identifier[self] . identifier[flatten_dict] ( identifier[v] , identifier[delimiter] , identifier[intermediates] , identifier[str] ( identifier[k] )). identifier[items] ()
)
keyword[else] :
identifier[items] . identifier[append] (( identifier[str] ( identifier[k] ), identifier[v] ))
keyword[return] identifier[dict] ( identifier[items] ) | def flatten_dict(self, d, delimiter='-', intermediates=False, parent_key=None):
"""
Flatten a dictionary.
Values that are dictionaries are flattened using delimiter in between
(eg. parent-child)
Values that are lists are flattened using delimiter
followed by the index (eg. parent-0)
example:
.. code-block:: python
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fruits': ['apple', 'peach', 'watermelon'],
'number': 52
}
# becomes
{
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
# if intermediates is True then we also get unflattened elements
# as well as the flattened ones.
{
'fish_facts': {
'sharks': 'Most will drown if they stop moving',
'skates': 'More than 200 species',
},
'fish_facts-sharks': 'Most will drown if they stop moving',
'fish_facts-skates': 'More than 200 species',
'fruits': ['apple', 'peach', 'watermelon'],
'fruits-0': 'apple',
'fruits-1': 'peach',
'fruits-2': 'watermelon',
'number': 52
}
"""
items = []
if isinstance(d, list):
d = dict(enumerate(d)) # depends on [control=['if'], data=[]]
for (k, v) in d.items():
if parent_key:
k = u'{}{}{}'.format(parent_key, delimiter, k) # depends on [control=['if'], data=[]]
if intermediates:
items.append((k, v)) # depends on [control=['if'], data=[]]
if isinstance(v, list):
v = dict(enumerate(v)) # depends on [control=['if'], data=[]]
if isinstance(v, collections.Mapping):
items.extend(self.flatten_dict(v, delimiter, intermediates, str(k)).items()) # depends on [control=['if'], data=[]]
else:
items.append((str(k), v)) # depends on [control=['for'], data=[]]
return dict(items) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.