code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1):
"""
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if 'method_codes' not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
if specimen:
specimens = [specimen]
for s in specimens:
ZED = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
# add coord here!
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png'
titles[title] = filename
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved
|
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
|
def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the sequence for this
object and create an ObjectNumber value for the id_"""
target._set_ids()
if target.name and target.vname and target.cache_key and target.fqname and not target.dataset:
return
Partition.before_update(mapper, conn, target)
|
event.listen method for Sqlalchemy to set the sequence for this
object and create an ObjectNumber value for the id_
|
def _get_stddev_deep_soil(self, mag, imt):
"""
Calculate and return total standard deviation for deep soil sites.
Implements formulae from the last column of table 4.
"""
# footnote from table 4 says that stderr for magnitudes over 7
# is equal to one of magnitude 7.
if mag > 7:
mag = 7
C = self.COEFFS_SOIL[imt]
return C['sigma0'] + C['magfactor'] * mag
|
Calculate and return total standard deviation for deep soil sites.
Implements formulae from the last column of table 4.
|
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
#match.extend( Subgraph( obj_dict = obj_d )
# for obj_d in obj_dict_list )
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match
|
Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
|
def txn(self, overwrite=False, lock=True):
"""Context manager for a state modification transaction."""
if lock:
self._lock.acquire()
try:
new_state, existing_generation = self.state_and_generation
new_state = copy.deepcopy(new_state)
yield new_state
if overwrite:
existing_generation = None
self.set_state(new_state, existing_generation=existing_generation)
finally:
if lock:
self._lock.release()
|
Context manager for a state modification transaction.
|
def from_env(cls, reactor=None, env=os.environ):
"""
Create a Vault client with configuration from the environment. Supports
a limited number of the available config options:
https://www.vaultproject.io/docs/commands/index.html#environment-variables
https://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40
Supported:
- ``VAULT_ADDR``
- ``VAULT_CACERT``
- ``VAULT_CLIENT_CERT``
- ``VAULT_CLIENT_KEY``
- ``VAULT_TLS_SERVER_NAME``
- ``VAULT_TOKEN``
Not currently supported:
- ``VAULT_CAPATH``
- ``VAULT_CLIENT_TIMEOUT``
- ``VAULT_MAX_RETRIES``
- ``VAULT_MFA``
- ``VAULT_RATE_LIMIT``
- ``VAULT_SKIP_VERIFY``
- ``VAULT_WRAP_TTL``
"""
address = env.get('VAULT_ADDR', 'https://127.0.0.1:8200')
# This seems to be what the Vault CLI defaults to
token = env.get('VAULT_TOKEN', 'TEST')
ca_cert = env.get('VAULT_CACERT')
tls_server_name = env.get('VAULT_TLS_SERVER_NAME')
client_cert = env.get('VAULT_CLIENT_CERT')
client_key = env.get('VAULT_CLIENT_KEY')
cf = ClientPolicyForHTTPS.from_pem_files(
caKey=ca_cert, privateKey=client_key, certKey=client_cert,
tls_server_name=tls_server_name
)
client, reactor = default_client(reactor, contextFactory=cf)
return cls(address, token, client=client, reactor=reactor)
|
Create a Vault client with configuration from the environment. Supports
a limited number of the available config options:
https://www.vaultproject.io/docs/commands/index.html#environment-variables
https://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40
Supported:
- ``VAULT_ADDR``
- ``VAULT_CACERT``
- ``VAULT_CLIENT_CERT``
- ``VAULT_CLIENT_KEY``
- ``VAULT_TLS_SERVER_NAME``
- ``VAULT_TOKEN``
Not currently supported:
- ``VAULT_CAPATH``
- ``VAULT_CLIENT_TIMEOUT``
- ``VAULT_MAX_RETRIES``
- ``VAULT_MFA``
- ``VAULT_RATE_LIMIT``
- ``VAULT_SKIP_VERIFY``
- ``VAULT_WRAP_TTL``
|
def io_surface(timestep, time, fid, fld):
"""Output for surface files"""
fid.write("{} {}".format(timestep, time))
fid.writelines(["%10.2e" % item for item in fld[:]])
fid.writelines(["\n"])
|
Output for surface files
|
def info(self):
"""
Retrieves the design document view information data, returns dictionary
GET databasename/_design/{ddoc}/_info
"""
ddoc_info = self.r_session.get(
'/'.join([self.document_url, '_info']))
ddoc_info.raise_for_status()
return response_to_json_dict(ddoc_info)
|
Retrieves the design document view information data, returns dictionary
GET databasename/_design/{ddoc}/_info
|
def save(self, acl=None, client=None):
"""Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
if acl is None:
acl = self
save_to_backend = acl.loaded
else:
save_to_backend = True
if save_to_backend:
self._save(acl, None, client)
|
Save this ACL for the current bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
|
def write_input_files(pst):
"""write parameter values to a model input files using a template files with
current parameter values (stored in Pst.parameter_data.parval1).
This is a simple implementation of what PEST does. It does not
handle all the special cases, just a basic function...user beware
Parameters
----------
pst : (pyemu.Pst)
a Pst instance
"""
par = pst.parameter_data
par.loc[:,"parval1_trans"] = (par.parval1 * par.scale) + par.offset
for tpl_file,in_file in zip(pst.template_files,pst.input_files):
write_to_template(pst.parameter_data.parval1_trans,tpl_file,in_file)
|
write parameter values to a model input files using a template files with
current parameter values (stored in Pst.parameter_data.parval1).
This is a simple implementation of what PEST does. It does not
handle all the special cases, just a basic function...user beware
Parameters
----------
pst : (pyemu.Pst)
a Pst instance
|
def _make_package(binder):
"""Makes an ``.epub.Package`` from a Binder'ish instance."""
package_id = binder.id
if package_id is None:
package_id = hash(binder)
package_name = "{}.opf".format(package_id)
extensions = get_model_extensions(binder)
template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
# Build the package item list.
items = []
# Build the binder as an item, specifically a navigation item.
navigation_document = bytes(HTMLFormatter(binder, extensions))
navigation_document_name = "{}{}".format(
package_id,
mimetypes.guess_extension('application/xhtml+xml', strict=False))
item = Item(str(navigation_document_name),
io.BytesIO(navigation_document),
'application/xhtml+xml',
is_navigation=True, properties=['nav'])
items.append(item)
resources = {}
# Roll through the model list again, making each one an item.
for model in flatten_model(binder):
for resource in getattr(model, 'resources', []):
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
if isinstance(model, (Binder, TranslucentBinder,)):
continue
if isinstance(model, DocumentPointer):
content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(content),
model.media_type)
items.append(item)
continue
for reference in model.references:
if reference.remote_type == INLINE_REFERENCE_TYPE:
# has side effects - converts ref type to INTERNAL w/
# appropriate uri, so need to replicate resource treatment from
# above
resource = _make_resource_from_inline(reference)
model.resources.append(resource)
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
reference.bind(resource, '../resources/{}')
elif reference.remote_type == INTERNAL_REFERENCE_TYPE:
filename = os.path.basename(reference.uri)
resource = resources.get(filename)
if resource:
reference.bind(resource, '../resources/{}')
complete_content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(complete_content),
model.media_type)
items.append(item)
# Build the package.
package = Package(package_name, items, binder.metadata)
return package
|
Makes an ``.epub.Package`` from a Binder'ish instance.
|
def check_password_readable(self, section, fields):
"""Check if there is a readable configuration file and print a warning."""
if not fields:
return
# The information which of the configuration files
# included which option is not available. To avoid false positives,
# a warning is only printed if exactly one file has been read.
if len(self.read_ok) != 1:
return
fn = self.read_ok[0]
if fileutil.is_accessable_by_others(fn):
log.warn(LOG_CHECK, "The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.", fn, section, fields)
if os.name == 'posix':
log.warn(LOG_CHECK, _("For example execute 'chmod go-rw %s'.") % fn)
elif os.name == 'nt':
log.warn(LOG_CHECK, _("See http://support.microsoft.com/kb/308419 for more info on setting file permissions."))
|
Check if there is a readable configuration file and print a warning.
|
def classify(self, classifier_name, examples, max_labels=None,
goodness_of_fit=False):
"""Usar un clasificador SVM para etiquetar textos nuevos.
Args:
classifier_name (str): Nombre del clasidicador a usar.
examples (list or str): Se espera un ejemplo o una lista de
ejemplos a clasificar en texto plano o en ids.
max_labels (int, optional): Cantidad de etiquetas a devolver para
cada ejemplo. Si se devuelve mas de una el orden corresponde a
la plausibilidad de cada etiqueta. Si es None devuelve todas
las etiquetas posibles.
goodness_of_fit (bool, optional): Indica si devuelve o no una
medida de cuan buenas son las etiquetas.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
Returns:
tuple (array, array): (labels_considerados, puntajes)
labels_considerados: Las etiquetas que se consideraron para
clasificar.
puntajes: Cuanto más alto el puntaje, más probable es que la
etiqueta considerada sea la adecuada.
"""
classifier = getattr(self, classifier_name)
texts_vectors = self._make_text_vectors(examples)
return classifier.classes_, classifier.decision_function(texts_vectors)
|
Usar un clasificador SVM para etiquetar textos nuevos.
Args:
classifier_name (str): Nombre del clasidicador a usar.
examples (list or str): Se espera un ejemplo o una lista de
ejemplos a clasificar en texto plano o en ids.
max_labels (int, optional): Cantidad de etiquetas a devolver para
cada ejemplo. Si se devuelve mas de una el orden corresponde a
la plausibilidad de cada etiqueta. Si es None devuelve todas
las etiquetas posibles.
goodness_of_fit (bool, optional): Indica si devuelve o no una
medida de cuan buenas son las etiquetas.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
Returns:
tuple (array, array): (labels_considerados, puntajes)
labels_considerados: Las etiquetas que se consideraron para
clasificar.
puntajes: Cuanto más alto el puntaje, más probable es que la
etiqueta considerada sea la adecuada.
|
def scrnaseq_concatenate_metadata(samples):
"""
Create file same dimension than mtx.colnames
with metadata and sample name to help in the
creation of the SC object.
"""
barcodes = {}
counts = ""
metadata = {}
has_sample_barcodes = False
for sample in dd.sample_data_iterator(samples):
if dd.get_sample_barcodes(sample):
has_sample_barcodes = True
with open(dd.get_sample_barcodes(sample)) as inh:
for line in inh:
cols = line.strip().split(",")
if len(cols) == 1:
# Assign sample name in case of missing in barcodes
cols.append("NaN")
barcodes[(dd.get_sample_name(sample), cols[0])] = cols[1:]
else:
barcodes[(dd.get_sample_name(sample), "NaN")] = [dd.get_sample_name(sample), "NaN"]
counts = dd.get_combined_counts(sample)
meta = map(str, list(sample["metadata"].values()))
meta_cols = list(sample["metadata"].keys())
meta = ["NaN" if not v else v for v in meta]
metadata[dd.get_sample_name(sample)] = meta
metadata_fn = counts + ".metadata"
if file_exists(metadata_fn):
return samples
with file_transaction(metadata_fn) as tx_metadata_fn:
with open(tx_metadata_fn, 'w') as outh:
outh.write(",".join(["sample"] + meta_cols) + '\n')
with open(counts + ".colnames") as inh:
for line in inh:
sample = line.split(":")[0]
if has_sample_barcodes:
barcode = sample.split("-")[1]
else:
barcode = "NaN"
outh.write(",".join(barcodes[(sample, barcode)] + metadata[sample]) + '\n')
return samples
|
Create file same dimension than mtx.colnames
with metadata and sample name to help in the
creation of the SC object.
|
def optimally_align_text(x, y, texts, expand, renderer=None, ax=None,
direction='xy'):
"""
For all text objects find alignment that causes the least overlap with
points and other texts and apply it
"""
if ax is None:
ax = plt.gca()
if renderer is None:
r = ax.get_figure().canvas.get_renderer()
else:
r = renderer
bboxes = get_bboxes(texts, r, expand)
if 'x' not in direction:
ha = ['']
else:
ha = ['left', 'right', 'center']
if 'y' not in direction:
va = ['']
else:
va = ['bottom', 'top', 'center']
alignment = list(product(ha, va))
for i, text in enumerate(texts):
counts = []
for h, v in alignment:
if h:
text.set_ha(h)
if v:
text.set_va(v)
bbox = text.get_window_extent(r).expanded(*expand).\
transformed(ax.transData.inverted())
c = get_points_inside_bbox(x, y, bbox)
counts.append(len(c) + bbox.count_overlaps(bboxes) - 1)
a = np.argmin(counts)
if 'x' in direction:
text.set_ha(alignment[a][0])
if 'y' in direction:
text.set_va(alignment[a][1])
bboxes[i] = text.get_window_extent(r).expanded(*expand).\
transformed(ax.transData.inverted())
return texts
|
For all text objects find alignment that causes the least overlap with
points and other texts and apply it
|
def _set_helper(self, v, load=False):
"""
Setter method for helper, mapped from YANG variable /rbridge_id/ipv6/router/ospf/graceful_restart/helper (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_helper is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_helper() directly.
YANG Description: Set graceful restart helper options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=helper.helper, is_container='container', presence=False, yang_name="helper", rest_name="helper", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set graceful restart helper options', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """helper must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=helper.helper, is_container='container', presence=False, yang_name="helper", rest_name="helper", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set graceful restart helper options', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__helper = t
if hasattr(self, '_set'):
self._set()
|
Setter method for helper, mapped from YANG variable /rbridge_id/ipv6/router/ospf/graceful_restart/helper (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_helper is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_helper() directly.
YANG Description: Set graceful restart helper options
|
def get_supported_types():
"""
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
"""
from datetime import date, timedelta
editable_types = [int, float, complex, list, set, dict, tuple, date,
timedelta] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except:
pass
try:
from pandas import DataFrame, Series, DatetimeIndex
editable_types += [DataFrame, Series, Index]
except:
pass
picklable_types = editable_types[:]
try:
from spyder.pil_patch import Image
editable_types.append(Image.Image)
except:
pass
return dict(picklable=picklable_types, editable=editable_types)
|
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
|
def forward_selection(self, data, labels, weights, num_features):
"""Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features)
|
Iteratively adds features to the model
|
def PushSection(self, name, pre_formatters):
"""Given a section name, push it on the top of the stack.
Returns:
The new section, or None if there is no such section.
"""
if name == '@':
value = self.stack[-1].context
else:
value = self.stack[-1].context.get(name)
# Apply pre-formatters
for i, (f, args, formatter_type) in enumerate(pre_formatters):
if formatter_type == ENHANCED_FUNC:
value = f(value, self, args)
elif formatter_type == SIMPLE_FUNC:
value = f(value)
else:
assert False, 'Invalid formatter type %r' % formatter_type
self.stack.append(_Frame(value))
return value
|
Given a section name, push it on the top of the stack.
Returns:
The new section, or None if there is no such section.
|
def job_path(cls, project, jobs):
"""Return a fully-qualified job string."""
return google.api_core.path_template.expand(
"projects/{project}/jobs/{jobs}", project=project, jobs=jobs
)
|
Return a fully-qualified job string.
|
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if self._collect_cx_state:
self._cx_state_psutil(tags=custom_tags)
self._cx_counters_psutil(tags=custom_tags)
|
Gather metrics about connections states and interfaces counters
using psutil facilities
|
def get_readme(self, repo):
"""
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
"""
readme_contents = repo.readme()
if readme_contents is not None:
self.total_readmes += 1
return 'MD'
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('readme'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'readme' in path.lower():
self.total_readmes += 1
return path
return 'MISS'
except (github3.models.GitHubError, StopIteration) as e:
return 'MISS'
|
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
|
def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with the trigger_streamer function
as is processing function.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
parent = scope_stack[-1]
alloc = parent.allocator
# The output is unused
output = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
trigger_stream, trigger_cond = parent.trigger_chain()
streamer_const = alloc.allocate_stream(DataStream.ConstantType, attach=True)
sensor_graph.add_node(u"({} {} && {} always) => {} using trigger_streamer".format(trigger_stream, trigger_cond, streamer_const, output))
sensor_graph.add_constant(streamer_const, self.index)
|
Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with the trigger_streamer function
as is processing function.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
|
def diffusionCount(source, target, sourceType = "raw", extraValue = None, pandasFriendly = False, compareCounts = False, numAuthors = True, useAllAuthors = True, _ProgBar = None, extraMapping = None):
"""Takes in two [RecordCollections](../classes/RecordCollection.html#metaknowledge.RecordCollection) and produces a `dict` counting the citations of _source_ by the [Records](../classes/Record.html#metaknowledge.Record) of _target_. By default the `dict` uses `Record` objects as keys but this can be changed with the _sourceType_ keyword to any of the WOS tags.
# Parameters
_source_ : `RecordCollection`
> A metaknowledge `RecordCollection` containing the `Records` being cited
_target_ : `RecordCollection`
> A metaknowledge `RecordCollection` containing the `Records` citing those in _source_
_sourceType_ : `optional [str]`
> default `'raw'`, if `'raw'` the returned `dict` will contain `Records` as keys. If it is a WOS tag the keys will be of that type.
_pandasFriendly_ : `optional [bool]`
> default `False`, makes the output be a dict with two keys one `"Record"` is the list of Records ( or data type requested by _sourceType_) the other is their occurrence counts as `"Counts"`. The lists are the same length.
_compareCounts_ : `optional [bool]`
> default `False`, if `True` the diffusion analysis will be run twice, first with source and target setup like the default (global scope) then using only the source `RecordCollection` (local scope).
_extraValue_ : `optional [str]`
> default `None`, if a tag the returned dictionary will have `Records` mapped to maps, these maps will map the entries for the tag to counts. If _pandasFriendly_ is also `True` the resultant dictionary will have an additional column called `'year'`. This column will contain the year the citations occurred, in addition the Records entries will be duplicated for each year they occur in.
> For example if `'year'` was given then the count for a single `Record` could be `{1990 : 1, 2000 : 5}`
_useAllAuthors_ : `optional [bool]`
> default `True`, if `False` only the first author will be used to generate the `Citations` for the _source_ `Records`
# Returns
`dict[:int]`
> A dictionary with the type given by _sourceType_ as keys and integers as values.
> If _compareCounts_ is `True` the values are tuples with the first integer being the diffusion in the target and the second the diffusion in the source.
> If _pandasFriendly_ is `True` the returned dict has keys with the names of the WOS tags and lists with their values, i.e. a table with labeled columns. The counts are in the column named `"TargetCount"` and if _compareCounts_ the local count is in a column called `"SourceCount"`.
"""
sourceCountString = "SourceCount"
targetCountString = "TargetCount"
if not isinstance(sourceType, str):
raise RuntimeError("{} is not a valid node type, only tags or the string 'raw' are allowed".format(sourceType))
if not isinstance(source, RecordCollection) or not isinstance(target, RecordCollection):
raise RuntimeError("Source and target must be RecordCollections.")
if extraValue is not None and not isinstance(extraValue, str):
raise RuntimeError("{} is not a valid extraValue, only tags are allowed".format(extraValue))
if extraMapping is None:
extraMapping = lambda x : x
if metaknowledge.VERBOSE_MODE or _ProgBar:
if _ProgBar:
PBar = _ProgBar
PBar.updateVal(0, "Starting to analyse a diffusion network")
else:
PBar = _ProgressBar(0, "Starting to analyse a diffusion network")
count = 0
maxCount = len(source)
else:
PBar = _ProgressBar("Starting to analyse a diffusion network", dummy = True)
count = 0
maxCount = len(source)
sourceDict = {}
#Tells the function if the IDs are made of lists or of str
listIds = None
for Rs in source:
if listIds is None and Rs.get(sourceType) is not None:
listIds = isinstance(Rs.get(sourceType), list)
count += 1
PBar.updateVal(count / maxCount * .10, "Analyzing source: " + str(Rs))
RsVal, RsExtras = makeNodeID(Rs, sourceType)
if RsVal:
if useAllAuthors:
for c in Rs.createCitation(multiCite = True):
sourceDict[c] = RsVal
else:
sourceDict[Rs.createCitation()] = RsVal
if extraValue is not None:
if listIds:
sourceCounts = {s : {targetCountString : 0} for s in itertools.chain.from_iterable(sourceDict.values())}
else:
sourceCounts = {s : {targetCountString : 0} for s in sourceDict.values()}
else:
if listIds:
sourceCounts = {s : 0 for s in itertools.chain.from_iterable(sourceDict.values())}
else:
sourceCounts = {s : 0 for s in sourceDict.values()}
count = 0
maxCount = len(target)
PBar.updateVal(.10, "Done analyzing sources, starting on targets")
for Rt in target:
count += 1
PBar.updateVal(count / maxCount * .90 + .10, "Analyzing target: {}".format(Rt))
targetCites = Rt.get('citations', [])
if extraValue is not None:
values = Rt.get(extraValue, [])
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
values = [extraMapping(val) for val in values]
for c in targetCites:
try:
RsourceVals = sourceDict[c]
except KeyError:
continue
if listIds:
for sVal in RsourceVals:
if extraValue:
sourceCounts[sVal][targetCountString] += 1
for val in values:
try:
sourceCounts[sVal][val] += 1
except KeyError:
sourceCounts[sVal][val] = 1
else:
sourceCounts[sVal] += 1
else:
if extraValue:
sourceCounts[RsourceVals][targetCountString] += 1
for val in values:
try:
sourceCounts[RsourceVals][val] += 1
except KeyError:
sourceCounts[RsourceVals][val] = 1
else:
sourceCounts[RsourceVals] += 1
if compareCounts:
localCounts = diffusionCount(source, source, sourceType = sourceType, pandasFriendly = False, compareCounts = False, extraValue = extraValue, _ProgBar = PBar)
if PBar and not _ProgBar:
PBar.finish("Done counting the diffusion of {} sources into {} targets".format(len(source), len(target)))
if pandasFriendly:
retDict = {targetCountString : []}
if numAuthors:
retDict["numAuthors"] = []
if compareCounts:
retDict[sourceCountString] = []
if extraValue is not None:
retDict[extraValue] = []
if sourceType == 'raw':
retrievedFields = []
targetCount = []
for R in sourceCounts.keys():
tagsLst = [t for t in R.keys() if t not in retrievedFields]
retrievedFields += tagsLst
for tag in retrievedFields:
retDict[tag] = []
for R, occ in sourceCounts.items():
if extraValue:
Rvals = R.subDict(retrievedFields)
for extraVal, occCount in occ.items():
retDict[extraValue].append(extraVal)
if numAuthors:
retDict["numAuthors"].append(len(R.get('authorsShort')))
for tag in retrievedFields:
retDict[tag].append(Rvals[tag])
retDict[targetCountString].append(occCount)
if compareCounts:
try:
retDict[sourceCountString].append(localCounts[R][extraVal])
except KeyError:
retDict[sourceCountString].append(0)
else:
Rvals = R.subDict(retrievedFields)
if numAuthors:
retDict["numAuthors"].append(len(R.get('authorsShort')))
for tag in retrievedFields:
retDict[tag].append(Rvals[tag])
retDict[targetCountString].append(occ)
if compareCounts:
retDict[sourceCountString].append(localCounts[R])
else:
countLst = []
recLst = []
locLst = []
if extraValue:
extraValueLst = []
for R, occ in sourceCounts.items():
if extraValue:
for extraVal, occCount in occ.items():
countLst.append(occCount)
recLst.append(R)
extraValueLst.append(extraVal)
if compareCounts:
try:
locLst.append(localCounts[R][extraValue])
except KeyError:
locLst.append(0)
else:
countLst.append(occ)
recLst.append(R)
if compareCounts:
locLst.append(localCounts[R])
if compareCounts:
retDict = {sourceType : recLst, targetCountString : countLst, sourceCountString : locLst}
else:
retDict = {sourceType : recLst, targetCountString : countLst}
if extraValue:
retDict[extraValue] = extraValueLst
return retDict
else:
if compareCounts:
for R, occ in localCounts.items():
sourceCounts[R] = (sourceCounts[R], occ)
return sourceCounts
|
Takes in two [RecordCollections](../classes/RecordCollection.html#metaknowledge.RecordCollection) and produces a `dict` counting the citations of _source_ by the [Records](../classes/Record.html#metaknowledge.Record) of _target_. By default the `dict` uses `Record` objects as keys but this can be changed with the _sourceType_ keyword to any of the WOS tags.
# Parameters
_source_ : `RecordCollection`
> A metaknowledge `RecordCollection` containing the `Records` being cited
_target_ : `RecordCollection`
> A metaknowledge `RecordCollection` containing the `Records` citing those in _source_
_sourceType_ : `optional [str]`
> default `'raw'`, if `'raw'` the returned `dict` will contain `Records` as keys. If it is a WOS tag the keys will be of that type.
_pandasFriendly_ : `optional [bool]`
> default `False`, makes the output be a dict with two keys one `"Record"` is the list of Records ( or data type requested by _sourceType_) the other is their occurrence counts as `"Counts"`. The lists are the same length.
_compareCounts_ : `optional [bool]`
> default `False`, if `True` the diffusion analysis will be run twice, first with source and target setup like the default (global scope) then using only the source `RecordCollection` (local scope).
_extraValue_ : `optional [str]`
> default `None`, if a tag the returned dictionary will have `Records` mapped to maps, these maps will map the entries for the tag to counts. If _pandasFriendly_ is also `True` the resultant dictionary will have an additional column called `'year'`. This column will contain the year the citations occurred, in addition the Records entries will be duplicated for each year they occur in.
> For example if `'year'` was given then the count for a single `Record` could be `{1990 : 1, 2000 : 5}`
_useAllAuthors_ : `optional [bool]`
> default `True`, if `False` only the first author will be used to generate the `Citations` for the _source_ `Records`
# Returns
`dict[:int]`
> A dictionary with the type given by _sourceType_ as keys and integers as values.
> If _compareCounts_ is `True` the values are tuples with the first integer being the diffusion in the target and the second the diffusion in the source.
> If _pandasFriendly_ is `True` the returned dict has keys with the names of the WOS tags and lists with their values, i.e. a table with labeled columns. The counts are in the column named `"TargetCount"` and if _compareCounts_ the local count is in a column called `"SourceCount"`.
|
def cancel_current_route(
payment_state: InitiatorPaymentState,
initiator_state: InitiatorTransferState,
) -> List[Event]:
""" Cancel current route.
This allows a new route to be tried.
"""
assert can_cancel(initiator_state), 'Cannot cancel a route after the secret is revealed'
transfer_description = initiator_state.transfer_description
payment_state.cancelled_channels.append(initiator_state.channel_identifier)
return events_for_cancel_current_route(transfer_description)
|
Cancel current route.
This allows a new route to be tried.
|
def unlock_file(filename):
'''
Unlock a locked file
Note that these locks are only recognized by Salt Cloud, and not other
programs or platforms.
'''
log.trace('Removing lock for %s', filename)
lock = filename + '.lock'
try:
os.remove(lock)
except OSError as exc:
log.trace('Unable to remove lock for %s: %s', filename, exc)
|
Unlock a locked file
Note that these locks are only recognized by Salt Cloud, and not other
programs or platforms.
|
def rm_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
fingerprint_hash_type=None):
'''
Remove an authorized key from the specified user's authorized key file,
using a file as source
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
lfile = __salt__['cp.cache_file'](source, saltenv)
if not os.path.isfile(lfile):
raise CommandExecutionError(
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile, fingerprint_hash_type)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
source
)
)
log.error(err)
__context__['ssh_auth.error'] = err
return 'fail'
else:
rval = ''
for key in s_keys:
rval += rm_auth_key(
user,
key,
config=config,
fingerprint_hash_type=fingerprint_hash_type
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
# and "new" as possible valid returns. I ordered the following as I
# thought best.
if 'Key not removed' in rval:
return 'Key not removed'
elif 'Key removed' in rval:
return 'Key removed'
else:
return 'Key not present'
|
Remove an authorized key from the specified user's authorized key file,
using a file as source
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
|
def get_valid_time_stamp():
"""
Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp
"""
time_stamp = str(datetime.datetime.now())
time_stamp = "time_" + time_stamp.replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_")
return time_stamp
|
Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp
|
def predict_class(self, features):
"""
Model inference base on the given data which returning label
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:return: ndarray or RDD[Sample] depend on the the type of features.
"""
if isinstance(features, RDD):
return self.predict_class_distributed(features)
else:
return self.predict_class_local(features)
|
Model inference base on the given data which returning label
:param features: it can be a ndarray or list of ndarray for locally inference
or RDD[Sample] for running in distributed fashion
:return: ndarray or RDD[Sample] depend on the the type of features.
|
def spi_configure_mode(self, spi_mode):
"""Configure the SPI interface by the well known SPI modes."""
if spi_mode == SPI_MODE_0:
self.spi_configure(SPI_POL_RISING_FALLING,
SPI_PHASE_SAMPLE_SETUP, SPI_BITORDER_MSB)
elif spi_mode == SPI_MODE_3:
self.spi_configure(SPI_POL_FALLING_RISING,
SPI_PHASE_SETUP_SAMPLE, SPI_BITORDER_MSB)
else:
raise RuntimeError('SPI Mode not supported')
|
Configure the SPI interface by the well known SPI modes.
|
def get(self, sid):
"""
Constructs a EngagementContext
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
"""
return EngagementContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
|
Constructs a EngagementContext
:param sid: Engagement Sid.
:returns: twilio.rest.studio.v1.flow.engagement.EngagementContext
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
|
def get_doctype(self, index, name):
"""
Returns a doctype given an index and a name
"""
if index not in self.indices:
self.get_all_indices()
return self.indices.get(index, {}).get(name, None)
|
Returns a doctype given an index and a name
|
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_migrate_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
migrate_time = ET.SubElement(rstp, "migrate-time")
migrate_time.text = kwargs.pop('migrate_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _add_subscribers_for_type(self, callback_type, subscribers, callbacks, **kwargs):
''' add a done/queued/progress callback to the appropriate list '''
for subscriber in subscribers:
callback_name = 'on_' + callback_type
if hasattr(subscriber, callback_name):
_function = functools.partial(getattr(subscriber, callback_name), **kwargs)
callbacks.append(_function)
|
add a done/queued/progress callback to the appropriate list
|
def region_path(cls, project, region):
"""Return a fully-qualified region string."""
return google.api_core.path_template.expand(
"projects/{project}/regions/{region}", project=project, region=region
)
|
Return a fully-qualified region string.
|
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness"""
with PcapWriter(filename, *args, **kargs) as pcap:
pcap.write(pkt)
|
Write a list of packets to a pcap file
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness
|
def rule_command_cmdlist_interface_s_interface_fc_leaf_interface_fibrechannel_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_s = ET.SubElement(cmdlist, "interface-s")
interface_fc_leaf = ET.SubElement(interface_s, "interface-fc-leaf")
interface = ET.SubElement(interface_fc_leaf, "interface")
fibrechannel_leaf = ET.SubElement(interface, "fibrechannel-leaf")
fibrechannel_leaf.text = kwargs.pop('fibrechannel_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _check_frames(self, frames, fill_value):
"""Reduce frames to no more than are available in the file."""
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames
|
Reduce frames to no more than are available in the file.
|
def vertex_graph(entities):
"""
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
"""
graph = nx.Graph()
closed = []
for index, entity in enumerate(entities):
if entity.closed:
closed.append(index)
else:
graph.add_edges_from(entity.nodes,
entity_index=index)
return graph, np.array(closed)
|
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
|
def setup_logging(log_file=os.devnull):
"""
Configures logging.
By default logs from all workers are printed to the console, entries are
prefixed with "N: " where N is the rank of the worker. Logs printed to the
console don't include timestaps.
Full logs with timestamps are saved to the log_file file.
"""
class RankFilter(logging.Filter):
def __init__(self, rank):
self.rank = rank
def filter(self, record):
record.rank = self.rank
return True
rank = get_rank()
rank_filter = RankFilter(rank)
logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s"
logging.basicConfig(level=logging.DEBUG,
format=logging_format,
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(rank)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').addFilter(rank_filter)
|
Configures logging.
By default logs from all workers are printed to the console, entries are
prefixed with "N: " where N is the rank of the worker. Logs printed to the
console don't include timestaps.
Full logs with timestamps are saved to the log_file file.
|
def get_evernote_notes(self, evernote_filter):
"""
get the notes related to the filter
:param evernote_filter: filtering
:return: notes
"""
data = []
note_store = self.client.get_note_store()
our_note_list = note_store.findNotesMetadata(self.token, evernote_filter, 0, 100,
EvernoteMgr.set_evernote_spec())
for note in our_note_list.notes:
whole_note = note_store.getNote(self.token, note.guid, True, True, False, False)
content = self._cleaning_content(whole_note.content)
data.append({'title': note.title, 'my_date': arrow.get(note.created),
'link': whole_note.attributes.sourceURL, 'content': content})
return data
|
get the notes related to the filter
:param evernote_filter: filtering
:return: notes
|
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail((
'Unable to read: {0:s} plist: {1:s} with error: missing root '
'key.').format(self.ARTIFACT_DEFINITION_NAME, location))
try:
match = self._GetKeysDefaultEmpty(root_key, self._KEYS)
except KeyError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail(
'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format(
self.ARTIFACT_DEFINITION_NAME, location, exception))
name = match.get('name', [None])[0]
uid = match.get('uid', [None])[0]
if not name or not uid:
# TODO: add and store preprocessing errors.
return
user_account = artifacts.UserAccountArtifact(
identifier=uid, username=name)
user_account.group_identifier = match.get('gid', [None])[0]
user_account.full_name = match.get('realname', [None])[0]
user_account.shell = match.get('shell', [None])[0]
user_account.user_directory = match.get('home', [None])[0]
try:
knowledge_base.AddUserAccount(user_account)
except KeyError:
# TODO: add and store preprocessing errors.
pass
|
Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
def by(self, technology):
"""
Get the plugins registered in PedalPi by technology
:param PluginTechnology technology: PluginTechnology identifier
"""
if technology == PluginTechnology.LV2 \
or str(technology).upper() == PluginTechnology.LV2.value.upper():
return self.lv2_builder.all
else:
return []
|
Get the plugins registered in PedalPi by technology
:param PluginTechnology technology: PluginTechnology identifier
|
def node_received_infos(node_id):
"""Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/infos, node {} does not exist".format(node_id)
)
# execute the request:
infos = node.received_infos(type=info_type)
try:
# ping the experiment
exp.info_get_request(node=node, infos=infos)
session.commit()
except Exception:
return error_response(
error_type="info_get_request error",
status=403,
participant=node.participant,
)
return success_response(infos=[i.__json__() for i in infos])
|
Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
|
def rank(self):
"""
Return the rank of the given hypergraph.
@rtype: int
@return: Rank of graph.
"""
max_rank = 0
for each in self.hyperedges():
if len(self.edge_links[each]) > max_rank:
max_rank = len(self.edge_links[each])
return max_rank
|
Return the rank of the given hypergraph.
@rtype: int
@return: Rank of graph.
|
def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool:
"""See `cirq.protocols.SupportsApproximateEquality`."""
if not isinstance(other, type(self)):
return NotImplemented
return approx_eq(self.operations, other.operations, atol=atol)
|
See `cirq.protocols.SupportsApproximateEquality`.
|
def teardown(self):
'''Teardown trust domain by removing trusted devices.'''
for device in self.devices:
self._remove_trustee(device)
self._populate_domain()
self.domain = {}
|
Teardown trust domain by removing trusted devices.
|
def _get_cookie(self, name, domain):
''' Return the cookie "name" for "domain" if found
If there are mote than one, only the first is returned
'''
for c in self.session.cookies:
if c.name==name and c.domain==domain:
return c
return None
|
Return the cookie "name" for "domain" if found
If there are mote than one, only the first is returned
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
app_conf = dci_config.generate_conf()
connectable = dci_config.get_engine(app_conf)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
)
with context.begin_transaction():
context.run_migrations()
|
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
|
def _server_rollback():
"""Removes script database and archive files to rollback the CI server
installation.
"""
#Remove the data and archive files specified in settings. The cron
#gets remove by the _setup_server() script if -rollback is specified.
from os import path, remove
archpath = path.abspath(path.expanduser(settings.archfile))
if path.isfile(archpath) and not args["nolive"]:
vms("Removing archive JSON file at {}.".format(archpath))
remove(archpath)
datapath = path.abspath(path.expanduser(settings.datafile))
if path.isfile(datapath) and not args["nolive"]:
vms("Removing script database JSON file at {}".format(datapath))
remove(datapath)
|
Removes script database and archive files to rollback the CI server
installation.
|
def add_observer(self, o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
self.observers[component_type].add(o)
|
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
|
def register(self, hash_types):
"""
Registers a function to generate a hash for data of the appropriate
types. This can be used to register custom classes. Internally this is
used to define how to hash non-builtin objects like ndarrays and uuids.
The registered function should return a tuple of bytes. First a small
prefix hinting at the data type, and second the raw bytes that can be
hashed.
Args:
hash_types (class or tuple of classes):
Returns:
func: closure to be used as the decorator
Example:
>>> # xdoctest: +SKIP
>>> # Skip this doctest because we dont want tests to modify
>>> # the global state.
>>> import ubelt as ub
>>> import pytest
>>> class MyType(object):
... def __init__(self, id):
... self.id = id
>>> data = MyType(1)
>>> # Custom types wont work with ub.hash_data by default
>>> with pytest.raises(TypeError):
... ub.hash_data(data)
>>> # You can register your functions with ubelt's internal
>>> # hashable_extension registery.
>>> @ub.util_hash._HASHABLE_EXTENSIONS.register(MyType)
>>> def hash_my_type(data):
... return b'mytype', six.b(ub.hash_data(data.id))
>>> # TODO: allow hash_data to take an new instance of
>>> # HashableExtensions, so we dont have to modify the global
>>> # ubelt state when we run tests.
>>> my_instance = MyType(1)
>>> ub.hash_data(my_instance)
"""
# ensure iterable
if not isinstance(hash_types, (list, tuple)):
hash_types = [hash_types]
def _decor_closure(hash_func):
for hash_type in hash_types:
key = (hash_type.__module__, hash_type.__name__)
self.keyed_extensions[key] = (hash_type, hash_func)
return hash_func
return _decor_closure
|
Registers a function to generate a hash for data of the appropriate
types. This can be used to register custom classes. Internally this is
used to define how to hash non-builtin objects like ndarrays and uuids.
The registered function should return a tuple of bytes. First a small
prefix hinting at the data type, and second the raw bytes that can be
hashed.
Args:
hash_types (class or tuple of classes):
Returns:
func: closure to be used as the decorator
Example:
>>> # xdoctest: +SKIP
>>> # Skip this doctest because we dont want tests to modify
>>> # the global state.
>>> import ubelt as ub
>>> import pytest
>>> class MyType(object):
... def __init__(self, id):
... self.id = id
>>> data = MyType(1)
>>> # Custom types wont work with ub.hash_data by default
>>> with pytest.raises(TypeError):
... ub.hash_data(data)
>>> # You can register your functions with ubelt's internal
>>> # hashable_extension registery.
>>> @ub.util_hash._HASHABLE_EXTENSIONS.register(MyType)
>>> def hash_my_type(data):
... return b'mytype', six.b(ub.hash_data(data.id))
>>> # TODO: allow hash_data to take an new instance of
>>> # HashableExtensions, so we dont have to modify the global
>>> # ubelt state when we run tests.
>>> my_instance = MyType(1)
>>> ub.hash_data(my_instance)
|
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
|
Provide validation for our window type, return the window
we have already been validated.
|
def build_getters_support_matrix(app):
"""Build the getters support matrix."""
status = subprocess.call("./test.sh", stdout=sys.stdout, stderr=sys.stderr)
if status != 0:
print("Something bad happened when processing the test reports.")
sys.exit(-1)
drivers = set()
matrix = {
m: defaultdict(dict)
for m in dir(NetworkDriver)
if not (m.startswith("_") or m in EXCLUDE_METHODS)
}
regex_name = re.compile(r"(?P<driver>\w+)\/.*::test_(?P<getter>\w+)")
filename = "./support/tests/report.json"
with open(filename, "r") as f:
data = json.loads(f.read())
for test in data["report"]["tests"]:
match = regex_name.search(test["name"])
if match:
driver = match.group("driver")
drivers.add(driver)
method = match.group("getter")
else:
continue
if method in EXCLUDE_IN_REPORT:
continue
result = test["outcome"]
if method in METHOD_ALIASES.keys():
method = METHOD_ALIASES[method]
intermediate_result = matrix[method].get(driver, None)
matrix[method][driver] = _merge_results(result, intermediate_result)
sorted_methods = sorted(matrix.keys())
drivers = sorted(drivers)
env = Environment(loader=FileSystemLoader("."))
template_file = env.get_template("matrix.j2")
rendered_template = template_file.render(
matrix=matrix, drivers=drivers, sorted_methods=sorted_methods
)
with open("support/matrix.rst", "w") as f:
f.write(rendered_template)
|
Build the getters support matrix.
|
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return ''.join(self.get_separator(i) + self.format[i].present(v) for i, v in enumerate(value))
|
Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
|
async def spawn_n(self, agent_cls, n, *args, addr=None, **kwargs):
"""Same as :meth:`~creamas.mp.MultiEnvironment.spawn`, but allows
spawning multiple agents with the same initialization parameters
simultaneously into **one** slave environment.
:param str agent_cls:
``qualname`` of the agent class. That is, the name should be in the
form of ``pkg.mod:cls``, e.g. ``creamas.core.agent:CreativeAgent``.
:param int n: Number of agents to spawn
:param str addr:
Optional. Address for the slave enviroment's manager.
If :attr:`addr` is None, spawns the agents in the slave environment
with currently smallest number of agents.
:returns:
A list of (:class:`aiomas.rpc.Proxy`, address)-tuples for the
spawned agents.
The ``*args`` and ``**kwargs`` are passed down to each agent's
:meth:`__init__`.
"""
if addr is None:
addr = await self._get_smallest_env()
r_manager = await self.env.connect(addr)
return await r_manager.spawn_n(agent_cls, n, *args, **kwargs)
|
Same as :meth:`~creamas.mp.MultiEnvironment.spawn`, but allows
spawning multiple agents with the same initialization parameters
simultaneously into **one** slave environment.
:param str agent_cls:
``qualname`` of the agent class. That is, the name should be in the
form of ``pkg.mod:cls``, e.g. ``creamas.core.agent:CreativeAgent``.
:param int n: Number of agents to spawn
:param str addr:
Optional. Address for the slave enviroment's manager.
If :attr:`addr` is None, spawns the agents in the slave environment
with currently smallest number of agents.
:returns:
A list of (:class:`aiomas.rpc.Proxy`, address)-tuples for the
spawned agents.
The ``*args`` and ``**kwargs`` are passed down to each agent's
:meth:`__init__`.
|
def deserialize_object(buffers, g=None):
"""Reconstruct an object serialized by serialize_object from data buffers.
Parameters
----------
bufs : list of buffers/bytes
g : globals to be used when uncanning
Returns
-------
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
"""
bufs = list(buffers)
pobj = buffer_to_bytes_py2(bufs.pop(0))
canned = pickle.loads(pobj)
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
for c in canned:
_restore_buffers(c, bufs)
newobj = uncan_sequence(canned, g)
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
newobj = {}
for k in sorted(canned):
c = canned[k]
_restore_buffers(c, bufs)
newobj[k] = uncan(c, g)
else:
_restore_buffers(canned, bufs)
newobj = uncan(canned, g)
return newobj, bufs
|
Reconstruct an object serialized by serialize_object from data buffers.
Parameters
----------
bufs : list of buffers/bytes
g : globals to be used when uncanning
Returns
-------
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
|
def _get_bucket_endpoint(self):
"""
Queries S3 to identify the region hosting the provided bucket.
"""
conn = S3Connection()
bucket = conn.lookup(self.bucket_name)
if not bucket:
# TODO: Make the bucket here?
raise InputParameterError('The provided bucket %s doesn\'t exist' % self.bucket_name)
endpoint = str(bucket.get_location())
return endpoint
|
Queries S3 to identify the region hosting the provided bucket.
|
def subslice(inner,outer,section):
'helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner'
# todo: think about constraints here. inner and outer ordered, inner[1] less than outer[1]-outer[0]
# todo: this would make more sense as a member of a Slice class
if section=='head': return outer[0],outer[0]+inner[0]
elif section=='tail': return outer[0]+inner[1],outer[1]
elif section=='middle': return outer[0]+inner[0],outer[0]+inner[1]
else: raise ValueError('section val %s not one of (head,middle,tail)'%section)
|
helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner
|
def post_card(message,
hook_url=None,
title=None,
theme_color=None):
'''
Send a message to an MS Teams channel.
:param message: The message to send to the MS Teams channel.
:param hook_url: The Teams webhook URL, if not specified in the configuration.
:param title: Optional title for the posted card
:param theme_color: Optional hex color highlight for the posted card
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' msteams.post_card message="Build is done"
'''
if not hook_url:
hook_url = _get_hook_url()
if not message:
log.error('message is a required option.')
payload = {
"text": message,
"title": title,
"themeColor": theme_color
}
result = salt.utils.http.query(hook_url,
method='POST',
data=salt.utils.json.dumps(payload),
status=True)
if result['status'] <= 201:
return True
else:
return {
'res': False,
'message': result.get('body', result['status'])
}
|
Send a message to an MS Teams channel.
:param message: The message to send to the MS Teams channel.
:param hook_url: The Teams webhook URL, if not specified in the configuration.
:param title: Optional title for the posted card
:param theme_color: Optional hex color highlight for the posted card
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' msteams.post_card message="Build is done"
|
def edit_prefix(self, auth, spec, attr):
""" Update prefix matching `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [prefix_spec]
Specifies the prefix to edit.
* `attr` [prefix_attr]
Prefix attributes.
Note that there are restrictions on when and how a prefix's type
can be changed; reservations can be changed to assignments and vice
versa, but only if they contain no child prefixes.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_prefix` for full
understanding.
"""
self._logger.debug("edit_prefix called; spec: %s attr: %s" %
(unicode(spec), unicode(attr)))
# Handle Pool - find correct one and remove bad pool keys
pool = None
if 'pool_id' in attr or 'pool_name' in attr:
if 'pool_id' in attr:
if attr['pool_id'] is None:
pool = {
'id': None,
'name': None
}
else:
pool = self._get_pool(auth, { 'id': attr['pool_id'] })
else:
if attr['pool_name'] is None:
pool = {
'id': None,
'name': None
}
else:
# resolve pool name to pool id
pool = self._get_pool(auth, { 'name': attr['pool_name'] })
# and delete the pool_name attr
del(attr['pool_name'])
attr['pool_id'] = pool['id']
else:
pool = {
'id': None,
'name': None
}
# Handle VRF - find the correct one and remove bad VRF keys.
vrf = self._get_vrf(auth, attr)
if 'vrf_rt' in attr:
del(attr['vrf_rt'])
if 'vrf_name' in attr:
del(attr['vrf_name'])
attr['vrf_id'] = vrf['id']
self._check_attr(attr, [], _prefix_attrs)
if 'expires' in attr:
attr['expires'] = _parse_expires(attr['expires'])
prefixes = self.list_prefix(auth, spec)
where, params1 = self._expand_prefix_spec(spec.copy())
update, params2 = self._sql_expand_update(attr)
params = dict(params2.items() + params1.items())
sql = "UPDATE ip_net_plan SET " + update + " WHERE " + where
sql += " RETURNING id"
self._execute(sql, params)
updated_prefixes = self._get_updated_rows(auth, self.search_prefix)
# write to audit table
audit_params = {
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
'vrf_id': vrf['id'],
'vrf_rt': vrf['rt'],
'vrf_name': vrf['name']
}
for p in prefixes:
audit_params['vrf_id'] = p['vrf_id']
audit_params['vrf_rt'] = p['vrf_rt']
audit_params['vrf_name'] = p['vrf_name']
audit_params['prefix_id'] = p['id']
audit_params['prefix_prefix'] = p['prefix']
audit_params['description'] = 'Edited prefix %s attr: %s' % (p['prefix'], unicode(attr))
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
# Only add to log if something was changed
if p['pool_id'] != pool['id']:
audit_params2 = {
'prefix_id': p['id'],
'prefix_prefix': p['prefix'],
'vrf_id': p['vrf_id'],
'vrf_rt': p['vrf_rt'],
'vrf_name': p['vrf_name'],
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
}
# If pool ID set, pool was expanded
if pool['id'] is not None:
audit_params2['pool_id'] = pool['id']
audit_params2['pool_name'] = pool['name']
audit_params2['description'] = 'Expanded pool %s with prefix %s' % (pool['name'], p['prefix'])
sql, params = self._sql_expand_insert(audit_params2)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
# if prefix had pool set previously, prefix was removed from that pool
if p['pool_id'] is not None:
pool2 = self._get_pool(auth, { 'id': p['pool_id'] })
audit_params2['pool_id'] = pool2['id']
audit_params2['pool_name'] = pool2['name']
audit_params2['description'] = 'Removed prefix %s from pool %s' % (p['prefix'], pool2['name'])
sql, params = self._sql_expand_insert(audit_params2)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
return updated_prefixes
|
Update prefix matching `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [prefix_spec]
Specifies the prefix to edit.
* `attr` [prefix_attr]
Prefix attributes.
Note that there are restrictions on when and how a prefix's type
can be changed; reservations can be changed to assignments and vice
versa, but only if they contain no child prefixes.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_prefix` for full
understanding.
|
def help_box():
"""A simple HTML help dialog box using the distribution data files."""
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
dialog_box = wx.Dialog(None, wx.ID_ANY, HELP_TITLE,
style=style, size=(620, 450))
html_widget = HtmlHelp(dialog_box, wx.ID_ANY)
html_widget.page = build_help_html()
dialog_box.ShowModal()
dialog_box.Destroy()
|
A simple HTML help dialog box using the distribution data files.
|
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id):
"""Remove a network from dhcp agent."""
return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % (
dhcp_agent, network_id))
|
Remove a network from dhcp agent.
|
def _write_frame(self, data):
"""Write a frame to the PN532 with the specified data bytearray."""
assert data is not None and 0 < len(data) < 255, 'Data must be array of 1 to 255 bytes.'
# Build frame to send as:
# - SPI data write (0x01)
# - Preamble (0x00)
# - Start code (0x00, 0xFF)
# - Command length (1 byte)
# - Command length checksum
# - Command bytes
# - Checksum
# - Postamble (0x00)
length = len(data)
frame = bytearray(length+8)
frame[0] = PN532_SPI_DATAWRITE
frame[1] = PN532_PREAMBLE
frame[2] = PN532_STARTCODE1
frame[3] = PN532_STARTCODE2
frame[4] = length & 0xFF
frame[5] = self._uint8_add(~length, 1)
frame[6:-2] = data
checksum = reduce(self._uint8_add, data, 0xFF)
frame[-2] = ~checksum & 0xFF
frame[-1] = PN532_POSTAMBLE
# Send frame.
logger.debug('Write frame: 0x{0}'.format(binascii.hexlify(frame)))
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
self._spi.write(frame)
self._gpio.set_high(self._cs)
|
Write a frame to the PN532 with the specified data bytearray.
|
def form_valid(self, form):
"""After the form is valid lets let people know"""
ret = super(ProjectCopy, self).form_valid(form)
self.copy_relations()
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Project %s copied' % self.object.name)
return ret
|
After the form is valid lets let people know
|
def get_scheduling_block_ids():
"""Return list of scheduling block IDs"""
ids = [key.split('/')[-1]
for key in DB.keys(pattern='scheduling_block/*')]
return sorted(ids)
|
Return list of scheduling block IDs
|
def cfg_folder_loader(path):
"""
:type path: str
"""
CFG_WILDCARD = '*.yaml'
return [load_cfg(filename) for filename in sorted(glob.glob(os.path.join(path, CFG_WILDCARD)))]
|
:type path: str
|
def _poll(self):
"""
Poll Trusted Advisor (Support) API for limit checks.
Return a dict of service name (string) keys to nested dict vals, where
each key is a limit name and each value the current numeric limit.
e.g.:
::
{
'EC2': {
'SomeLimit': 10,
}
}
"""
logger.info("Beginning TrustedAdvisor poll")
tmp = self._get_limit_check_id()
if not self.have_ta:
logger.info('TrustedAdvisor.have_ta is False; not polling TA')
return {}
if tmp is None:
logger.critical("Unable to find 'Service Limits' Trusted Advisor "
"check; not using Trusted Advisor data.")
return
check_id, metadata = tmp
checks = self._get_refreshed_check_result(check_id)
region = self.ta_region or self.conn._client_config.region_name
res = {}
if checks['result'].get('status', '') == 'not_available':
logger.warning(
'Trusted Advisor returned status "not_available" for '
'service limit check; cannot retrieve limits from TA.'
)
return {}
if 'flaggedResources' not in checks['result']:
logger.warning(
'Trusted Advisor returned no results for '
'service limit check; cannot retrieve limits from TA.'
)
return {}
for check in checks['result']['flaggedResources']:
if 'region' in check and check['region'] != region:
continue
data = dict(zip(metadata, check['metadata']))
if data['Service'] not in res:
res[data['Service']] = {}
try:
val = int(data['Limit Amount'])
except ValueError:
val = data['Limit Amount']
if val != 'Unlimited':
logger.error('TrustedAdvisor returned unknown Limit '
'Amount %s for %s - %s', val, data['Service'],
data['Limit Name'])
continue
else:
logger.debug('TrustedAdvisor setting explicit "Unlimited" '
'limit for %s - %s', data['Service'],
data['Limit Name'])
res[data['Service']][data['Limit Name']] = val
logger.info("Finished TrustedAdvisor poll")
return res
|
Poll Trusted Advisor (Support) API for limit checks.
Return a dict of service name (string) keys to nested dict vals, where
each key is a limit name and each value the current numeric limit.
e.g.:
::
{
'EC2': {
'SomeLimit': 10,
}
}
|
def Parse(self, conditions, host_data):
"""Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
"""
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result
|
Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
|
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
'''
# Protect against injection
if pkg:
pkgs = [_cmd_quote(pkg)]
elif pkgs:
pkgs = [_cmd_quote(v) for v in pkgs]
else:
pkgs = []
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install', '--json']
if silent:
cmd.append('--silent')
if not dir:
cmd.append('--global')
if registry:
cmd.append('--registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
cmd.extend(pkgs)
env = env or {}
if runas:
uid = salt.utils.user.get_uid(runas)
if uid:
env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd,
python_shell=True,
cwd=dir,
runas=runas,
env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return salt.utils.json.find_json(npm_output)
except ValueError:
return npm_output
|
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
|
def create_window(self, pane, name=None, set_active=True):
"""
Create a new window that contains just this pane.
:param pane: The :class:`.Pane` instance to put in the new window.
:param name: If given, name for the new window.
:param set_active: When True, focus the new window.
"""
assert isinstance(pane, Pane)
assert name is None or isinstance(name, six.text_type)
# Take the first available index.
taken_indexes = [w.index for w in self.windows]
index = self.base_index
while index in taken_indexes:
index += 1
# Create new window and add it.
w = Window(index)
w.add_pane(pane)
self.windows.append(w)
# Sort windows by index.
self.windows = sorted(self.windows, key=lambda w: w.index)
app = get_app(return_none=True)
if app is not None and set_active:
self.set_active_window(w)
if name is not None:
w.chosen_name = name
assert w.active_pane == pane
assert w._get_parent(pane)
|
Create a new window that contains just this pane.
:param pane: The :class:`.Pane` instance to put in the new window.
:param name: If given, name for the new window.
:param set_active: When True, focus the new window.
|
def load_targets(explanatory_rasters):
"""
Parameters
----------
explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables
Returns
-------
expl : Array of explanatory variables
raster_info : dict of raster info
"""
explanatory_raster_arrays = []
aff = None
shape = None
crs = None
for raster in explanatory_rasters:
logger.debug(raster)
with rasterio.open(raster) as src:
ar = src.read(1) # TODO band num?
# Save or check the geotransform
if not aff:
aff = src.affine
else:
assert aff == src.affine
# Save or check the shape
if not shape:
shape = ar.shape
else:
assert shape == ar.shape
# Save or check the geotransform
if not crs:
crs = src.crs
else:
assert crs == src.crs
# Flatten in one dimension
arf = ar.flatten()
explanatory_raster_arrays.append(arf)
expl = np.array(explanatory_raster_arrays).T
raster_info = {
'affine': aff,
'shape': shape,
'crs': crs
}
return expl, raster_info
|
Parameters
----------
explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables
Returns
-------
expl : Array of explanatory variables
raster_info : dict of raster info
|
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
|
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
|
def use_federated_bank_view(self):
"""Pass through to provider ItemLookupSession.use_federated_bank_view"""
self._bank_view = FEDERATED
# self._get_provider_session('item_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_bank_view()
except AttributeError:
pass
|
Pass through to provider ItemLookupSession.use_federated_bank_view
|
def install(name=None,
fromrepo=None,
pkgs=None,
sources=None,
jail=None,
chroot=None,
root=None,
orphan=False,
force=False,
glob=False,
local=False,
dryrun=False,
quiet=False,
reinstall_requires=False,
regex=False,
pcre=False,
batch=False,
**kwargs):
'''
Install package(s) from a repository
name
The name of the package to install
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
jail
Install the package into the specified jail
chroot
Install the package into the specified chroot (ignored if ``jail`` is
specified)
root
Install the package into the specified root (ignored if ``jail`` is
specified)
orphan
Mark the installed package as orphan. Will be automatically removed
if no other packages depend on them. For more information please
refer to ``pkg-autoremove(8)``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> orphan=True
force
Force the reinstallation of the package if already installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> force=True
glob
Treat the package names as shell glob patterns.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> glob=True
local
Do not update the repository catalogs with ``pkg-update(8)``. A
value of ``True`` here is equivalent to using the ``-U`` flag with
``pkg install``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> local=True
dryrun
Dru-run mode. The list of changes to packages is always printed,
but no changes are actually made.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> dryrun=True
quiet
Force quiet output, except when dryrun is used, where pkg install
will always show packages to be installed, upgraded or deleted.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> quiet=True
reinstall_requires
When used with force, reinstalls any packages that require the
given package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> reinstall_requires=True force=True
.. versionchanged:: 2014.7.0
``require`` kwarg renamed to ``reinstall_requires``
fromrepo
In multi-repo mode, override the pkg.conf ordering and only attempt
to download packages from the named repository.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> fromrepo=repo
regex
Treat the package names as a regular expression
CLI Example:
.. code-block:: bash
salt '*' pkg.install <regular expression> regex=True
pcre
Treat the package names as extended regular expressions.
CLI Example:
.. code-block:: bash
batch
Use BATCH=true for pkg install, skipping all questions.
Be careful when using in production.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> batch=True
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
env = {}
opts = 'y'
if salt.utils.data.is_true(orphan):
opts += 'A'
if salt.utils.data.is_true(force):
opts += 'f'
if salt.utils.data.is_true(glob):
opts += 'g'
if salt.utils.data.is_true(local):
opts += 'U'
if salt.utils.data.is_true(dryrun):
opts += 'n'
if salt.utils.data.is_true(quiet):
opts += 'q'
if salt.utils.data.is_true(reinstall_requires):
opts += 'R'
if salt.utils.data.is_true(regex):
opts += 'x'
if salt.utils.data.is_true(pcre):
opts += 'X'
if salt.utils.data.is_true(batch):
env = {
"BATCH": "true",
"ASSUME_ALWAYS_YES": "YES"
}
old = list_pkgs(jail=jail, chroot=chroot, root=root)
if pkg_type == 'file':
pkg_cmd = 'add'
# pkg add has smaller set of options (i.e. no -y or -n), filter below
opts = ''.join([opt for opt in opts if opt in 'AfIMq'])
targets = pkg_params
elif pkg_type == 'repository':
pkg_cmd = 'install'
if pkgs is None and kwargs.get('version') and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
pkg_params = {name: kwargs.get('version')}
targets = []
for param, version_num in six.iteritems(pkg_params):
if version_num is None:
targets.append(param)
else:
targets.append('{0}-{1}'.format(param, version_num))
else:
raise CommandExecutionError('Problem encountered installing package(s)')
cmd = _pkg(jail, chroot, root)
cmd.append(pkg_cmd)
if fromrepo:
cmd.extend(['-r', fromrepo])
if opts:
cmd.append('-' + opts)
cmd.extend(targets)
if pkg_cmd == 'add' and salt.utils.data.is_true(dryrun):
# pkg add doesn't have a dry-run mode, so echo out what will be run
return ' '.join(cmd)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False,
env=env
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop(_contextkey(jail, chroot, root), None)
__context__.pop(_contextkey(jail, chroot, root, prefix='pkg.origin'), None)
new = list_pkgs(jail=jail, chroot=chroot, root=root)
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
|
Install package(s) from a repository
name
The name of the package to install
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
jail
Install the package into the specified jail
chroot
Install the package into the specified chroot (ignored if ``jail`` is
specified)
root
Install the package into the specified root (ignored if ``jail`` is
specified)
orphan
Mark the installed package as orphan. Will be automatically removed
if no other packages depend on them. For more information please
refer to ``pkg-autoremove(8)``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> orphan=True
force
Force the reinstallation of the package if already installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> force=True
glob
Treat the package names as shell glob patterns.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> glob=True
local
Do not update the repository catalogs with ``pkg-update(8)``. A
value of ``True`` here is equivalent to using the ``-U`` flag with
``pkg install``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> local=True
dryrun
Dru-run mode. The list of changes to packages is always printed,
but no changes are actually made.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> dryrun=True
quiet
Force quiet output, except when dryrun is used, where pkg install
will always show packages to be installed, upgraded or deleted.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> quiet=True
reinstall_requires
When used with force, reinstalls any packages that require the
given package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> reinstall_requires=True force=True
.. versionchanged:: 2014.7.0
``require`` kwarg renamed to ``reinstall_requires``
fromrepo
In multi-repo mode, override the pkg.conf ordering and only attempt
to download packages from the named repository.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> fromrepo=repo
regex
Treat the package names as a regular expression
CLI Example:
.. code-block:: bash
salt '*' pkg.install <regular expression> regex=True
pcre
Treat the package names as extended regular expressions.
CLI Example:
.. code-block:: bash
batch
Use BATCH=true for pkg install, skipping all questions.
Be careful when using in production.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> batch=True
|
def submitter(self):
"""
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
"""
if self.api and self.submitter_id:
return self.api._get_user(self.submitter_id)
|
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
|
def load(self, steps_dir=None, step_file=None, step_list=None):
"""Load CWL steps into the WorkflowGenerator's steps library.
Adds steps (command line tools and workflows) to the
``WorkflowGenerator``'s steps library. These steps can be used to
create workflows.
Args:
steps_dir (str): path to directory containing CWL files. All CWL in
the directory are loaded.
step_file (str): path to a file containing a CWL step that will be
added to the steps library.
"""
self._closed()
self.steps_library.load(steps_dir=steps_dir, step_file=step_file,
step_list=step_list)
|
Load CWL steps into the WorkflowGenerator's steps library.
Adds steps (command line tools and workflows) to the
``WorkflowGenerator``'s steps library. These steps can be used to
create workflows.
Args:
steps_dir (str): path to directory containing CWL files. All CWL in
the directory are loaded.
step_file (str): path to a file containing a CWL step that will be
added to the steps library.
|
def signature_type(self):
"""Return the signature type used in this MAR.
Returns:
One of None, 'unknown', 'sha1', or 'sha384'
"""
if not self.mardata.signatures:
return None
for sig in self.mardata.signatures.sigs:
if sig.algorithm_id == 1:
return 'sha1'
elif sig.algorithm_id == 2:
return 'sha384'
else:
return 'unknown'
|
Return the signature type used in this MAR.
Returns:
One of None, 'unknown', 'sha1', or 'sha384'
|
def unfix(self, param):
"""
Enable parameter optimization.
Parameters
----------
param : str
Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
"""
if param == "delta":
self._unfix("logistic")
else:
self._fix[param] = False
|
Enable parameter optimization.
Parameters
----------
param : str
Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
|
def set_input_data(self, key, value):
"""
set_input_data will automatically create an input channel if necessary.
Automatic channel creation is intended for the case where users are trying to set initial values on a block
whose input channels aren't subscribed to anything in the graph.
"""
if not key in self.input_channels.keys():
self.set_input_channel(key, Channel())
self.input_channels[key].set_value(Data(self.time, value))
|
set_input_data will automatically create an input channel if necessary.
Automatic channel creation is intended for the case where users are trying to set initial values on a block
whose input channels aren't subscribed to anything in the graph.
|
def link_for_image(self, base_dir: str, conf: Config) -> int:
"""Link all artifacts required for a Docker image under `base_dir` and
return the number of linked artifacts."""
return self.link_types(
base_dir,
[ArtifactType.app, ArtifactType.binary, ArtifactType.gen_py],
conf)
|
Link all artifacts required for a Docker image under `base_dir` and
return the number of linked artifacts.
|
def link(self, thing1, thing2):
"""
Link thing1 and thing2, adding the karma of each into
a single entry.
If any thing does not exist, it is created.
"""
thing1 = thing1.strip().lower()
thing2 = thing2.strip().lower()
if thing1 == thing2:
raise SameName("Attempted to link two of the same name")
self.change(thing1, 0)
self.change(thing2, 0)
return self._link(thing1, thing2)
|
Link thing1 and thing2, adding the karma of each into
a single entry.
If any thing does not exist, it is created.
|
def show_support_save_status_output_show_support_save_status_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_support_save_status = ET.Element("show_support_save_status")
config = show_support_save_status
output = ET.SubElement(show_support_save_status, "output")
show_support_save_status = ET.SubElement(output, "show-support-save-status")
message = ET.SubElement(show_support_save_status, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def compute_auth_key(userid, password):
"""
Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point)
"""
import sys
if sys.version_info >= (3, 0):
return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303
password.encode("ascii")))).hexdigest()
return hashlib.sha1("|".join((userid, password))).hexdigest()
|
Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point)
|
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
|
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
|
def storages(self):
"""This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
"""
return storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version)
|
This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
|
def make_psrrates(pkllist, nbins=60, period=0.156):
""" Visualize cands in set of pkl files from pulsar observations.
Input pkl list assumed to start with on-axis pulsar scan, followed by off-axis scans.
nbins for output histogram. period is pulsar period in seconds (used to find single peak for cluster of detections).
"""
# get metadata
state = pickle.load(open(pkllist[0], 'r')) # assume single state for all scans
if 'image2' in state['searchtype']:
immaxcol = state['features'].index('immax2')
logger.info('Using immax2 for flux.')
elif 'image1' in state['searchtype']:
try:
immaxcol = state['features'].index('immax1')
logger.info('Using immax1 for flux.')
except:
immaxcol = state['features'].index('snr1')
logger.info('Warning: Using snr1 for flux.')
# read cands
for pklfile in pkllist:
loc, prop = read_candidates(pklfile)
ffm = []
if (loc):
times = int2mjd(state, loc)
for (mint,maxt) in zip(np.arange(times.min()-period/2,times.max()+period/2,period), np.arange(times.min()+period/2,times.max()+3*period/2,period)):
ff = np.array([prop[i][immaxcol] for i in range(len(prop))])
mm = ff[np.where( (times >= mint) & (times < maxt) )]
if mm:
ffm.append(mm.max())
ffm.sort()
logger.info('Found %d unique pulses.' % len(ffm))
# calculate params
if pkllist.index(pklfile) == 0:
duration0 = times.max() - times.min()
ratemin = 1/duration0
ratemax = len(ffm)/duration0
rates = np.linspace(ratemin, ratemax, nbins)
f0m = ffm
elif pkllist.index(pklfile) == 1:
duration1 = times.max() - times.min()
f1m = ffm
elif pkllist.index(pklfile) == 2:
f2m = ffm
elif pkllist.index(pklfile) == 3:
f3m = ffm
# calc rates
f0 = []; f1 = []; f2 = []; f3 = []
for rr in rates:
num0 = (np.round(rr*duration0)).astype(int)
num1 = (np.round(rr*duration1)).astype(int)
if (num0 > 0) and (num0 <= len(f0m)):
f0.append((rr,f0m[-num0]))
if (num1 > 0) and (num1 <= len(f1m)):
f1.append((rr,f1m[-num1]))
if (num1 > 0) and (num1 <= len(f2m)):
f2.append((rr,f2m[-num1]))
if len(pkllist) == 4:
if f3m:
if (num1 > 0) and (num1 <= len(f3m)):
f3.append((rr,f3m[-num1]))
if f3:
return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose(), 3: np.array(f3).transpose()}
else:
return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose()}
|
Visualize cands in set of pkl files from pulsar observations.
Input pkl list assumed to start with on-axis pulsar scan, followed by off-axis scans.
nbins for output histogram. period is pulsar period in seconds (used to find single peak for cluster of detections).
|
def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result
|
Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
|
def get_instance(self, payload):
"""
Build an instance of ConnectAppInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
"""
return ConnectAppInstance(self._version, payload, account_sid=self._solution['account_sid'], )
|
Build an instance of ConnectAppInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
|
def dqdv_cycle(cycle, splitter=True, **kwargs):
"""Convenience functions for creating dq-dv data from given capacity and
voltage cycle.
Returns the a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',
'direction' (1 or -1)).
splitter (bool): insert a np.NaN row between charge and discharge.
Returns:
List of step numbers corresponding to the selected steptype.
Returns a pandas.DataFrame
instead of a list if pdtype is set to True.
Example:
>>> cycle_df = my_data.get_cap(
>>> ... 1,
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth"
>>> ... )
>>> voltage, incremental = ica.dqdv_cycle(cycle_df)
"""
c_first = cycle.loc[cycle["direction"] == -1]
c_last = cycle.loc[cycle["direction"] == 1]
converter = Converter(**kwargs)
converter.set_data(c_first["capacity"], c_first["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_first = converter.voltage_processed
incremental_capacity_first = converter.incremental_capacity
if splitter:
voltage_first = np.append(voltage_first, np.NaN)
incremental_capacity_first = np.append(incremental_capacity_first,
np.NaN)
converter = Converter(**kwargs)
converter.set_data(c_last["capacity"], c_last["voltage"])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_last = converter.voltage_processed[::-1]
incremental_capacity_last = converter.incremental_capacity[::-1]
voltage = np.concatenate((voltage_first,
voltage_last))
incremental_capacity = np.concatenate((incremental_capacity_first,
incremental_capacity_last))
return voltage, incremental_capacity
|
Convenience functions for creating dq-dv data from given capacity and
voltage cycle.
Returns the a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',
'direction' (1 or -1)).
splitter (bool): insert a np.NaN row between charge and discharge.
Returns:
List of step numbers corresponding to the selected steptype.
Returns a pandas.DataFrame
instead of a list if pdtype is set to True.
Example:
>>> cycle_df = my_data.get_cap(
>>> ... 1,
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth"
>>> ... )
>>> voltage, incremental = ica.dqdv_cycle(cycle_df)
|
def deleteRole(self, *args, **kwargs):
"""
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
|
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
|
def filter(self, source_file, encoding): # noqa A001
"""Parse file."""
with codecs.open(source_file, 'r', encoding=encoding) as f:
text = f.read()
return [filters.SourceText(self._filter(text), source_file, encoding, 'context')]
|
Parse file.
|
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._color is not None:
return False
if self._alias is not None:
return False
if self._description is not None:
return False
if self._attachment is not None:
return False
if self._pointer is not None:
return False
if self._status is not None:
return False
if self._redirect_url is not None:
return False
return True
|
:rtype: bool
|
def add(self, data_source, module, package=None):
"""
Add data_source to model. Tries to import module, then looks for data
source class definition.
:param data_source: Name of data source to add.
:type data_source: str
:param module: Module in which data source resides. Can be absolute or
relative. See :func:`importlib.import_module`
:type module: str
:param package: Optional, but must be used if module is relative.
:type package: str
.. seealso::
:func:`importlib.import_module`
"""
super(Data, self).add(data_source, module, package)
# only update layer info if it is missing!
if data_source not in self.layer:
# copy data source parameters to :attr:`Layer.layer`
self.layer[data_source] = {'module': module, 'package': package}
# add a place holder for the data source object when it's constructed
self.objects[data_source] = None
|
Add data_source to model. Tries to import module, then looks for data
source class definition.
:param data_source: Name of data source to add.
:type data_source: str
:param module: Module in which data source resides. Can be absolute or
relative. See :func:`importlib.import_module`
:type module: str
:param package: Optional, but must be used if module is relative.
:type package: str
.. seealso::
:func:`importlib.import_module`
|
def clone(self, callable=None, **overrides):
"""Clones the Callable optionally with new settings
Args:
callable: New callable function to wrap
**overrides: Parameter overrides to apply
Returns:
Cloned Callable object
"""
old = {k: v for k, v in self.get_param_values()
if k not in ['callable', 'name']}
params = dict(old, **overrides)
callable = self.callable if callable is None else callable
return self.__class__(callable, **params)
|
Clones the Callable optionally with new settings
Args:
callable: New callable function to wrap
**overrides: Parameter overrides to apply
Returns:
Cloned Callable object
|
def do_hit(self, arg):
"""
Usage:
hit create [<numWorkers> <reward> <duration>]
hit extend <HITid> [(--assignments <number>)] [(--expiration <minutes>)]
hit expire (--all | <HITid> ...)
hit dispose (--all | <HITid> ...)
hit delete (--all | <HITid> ...)
hit list [--active | --reviewable] [--all-studies]
hit help
"""
if arg['create']:
self.hit_create(arg['<numWorkers>'], arg['<reward>'],
arg['<duration>'])
self.update_hit_tally()
elif arg['extend']:
self.amt_services_wrapper.hit_extend(arg['<HITid>'], arg['<number>'], arg['<minutes>'])
elif arg['expire']:
self.amt_services_wrapper.hit_expire(arg['--all'], arg['<HITid>'])
self.update_hit_tally()
elif arg['delete'] or arg['dispose']:
self.amt_services_wrapper.hit_delete(arg['--all'], arg['<HITid>'])
self.update_hit_tally()
elif arg['list']:
self.hit_list(arg['--active'], arg['--reviewable'], arg['--all-studies'])
else:
self.help_hit()
|
Usage:
hit create [<numWorkers> <reward> <duration>]
hit extend <HITid> [(--assignments <number>)] [(--expiration <minutes>)]
hit expire (--all | <HITid> ...)
hit dispose (--all | <HITid> ...)
hit delete (--all | <HITid> ...)
hit list [--active | --reviewable] [--all-studies]
hit help
|
def single(self, predicate):
"""
Returns single element that matches given predicate.
Raises:
* NoMatchingElement error if no matching elements are found
* MoreThanOneMatchingElement error if more than one matching
element is found
:param predicate: predicate as a lambda expression
:return: Matching element as object
"""
result = self.where(predicate).to_list()
count = len(result)
if count == 0:
raise NoMatchingElement("No matching element found")
if count > 1:
raise MoreThanOneMatchingElement(
"More than one matching element found. Use where instead"
)
return result[0]
|
Returns single element that matches given predicate.
Raises:
* NoMatchingElement error if no matching elements are found
* MoreThanOneMatchingElement error if more than one matching
element is found
:param predicate: predicate as a lambda expression
:return: Matching element as object
|
def cmd(send, msg, args):
"""Gets the location of a ZIP code
Syntax: {command} (zipcode)
Powered by STANDS4, www.stands4.com
"""
uid = args['config']['api']['stands4uid']
token = args['config']['api']['stands4token']
parser = arguments.ArgParser(args['config'])
parser.add_argument('zipcode', action=arguments.ZipParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
req = get("http://www.stands4.com/services/v2/zip.php", params={'uid': uid, 'tokenid': token, 'zip': cmdargs.zipcode})
xml = etree.fromstring(req.content, parser=etree.XMLParser(recover=True))
location = xml.find('location').text
send("%s: %s" % (cmdargs.zipcode, location))
|
Gets the location of a ZIP code
Syntax: {command} (zipcode)
Powered by STANDS4, www.stands4.com
|
def sendCommands(comPort, commands):
"""Send X10 commands using the FireCracker on comPort
comPort should be the name of a serial port on the host platform. On
Windows, for example, 'com1'.
commands should be a string consisting of X10 commands separated by
commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The
letter is a house code (A-P) and the number is the device number (1-16).
Possible commands for a house code / device number combination are
'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a
house code alone after sending an On command to a specific device. The
'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also
be used with a house code alone.
# Turn on module A1
>>> sendCommands('com1', 'A1 On')
# Turn all modules with house code A off
>>> sendCommands('com1', 'A All Off')
# Turn all lamp modules with house code B on
>>> sendCommands('com1', 'B Lamps On')
# Turn on module A1 and dim it 3 steps, then brighten it 1 step
>>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')
"""
mutex.acquire()
try:
try:
port = serial.Serial(port=comPort)
header = '11010101 10101010'
footer = '10101101'
for command in _translateCommands(commands):
_sendBinaryData(port, header + command + footer)
except serial.SerialException:
print('Unable to open serial port %s' % comPort)
print('')
raise
finally:
mutex.release()
|
Send X10 commands using the FireCracker on comPort
comPort should be the name of a serial port on the host platform. On
Windows, for example, 'com1'.
commands should be a string consisting of X10 commands separated by
commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The
letter is a house code (A-P) and the number is the device number (1-16).
Possible commands for a house code / device number combination are
'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a
house code alone after sending an On command to a specific device. The
'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also
be used with a house code alone.
# Turn on module A1
>>> sendCommands('com1', 'A1 On')
# Turn all modules with house code A off
>>> sendCommands('com1', 'A All Off')
# Turn all lamp modules with house code B on
>>> sendCommands('com1', 'B Lamps On')
# Turn on module A1 and dim it 3 steps, then brighten it 1 step
>>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')
|
def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue)
R._S = super(Context, self).monitor(name, R._event, request)
return R
|
Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.