code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def parse_command_line(self, argv=None):
"""Parse the jhubctl command line arguments.
This overwrites traitlets' default `parse_command_line` method
and tailors it to jhubctl's needs.
"""
argv = sys.argv[1:] if argv is None else argv
self.argv = [py3compat.cast_unicode(arg) for arg in argv]
# Append Provider Class to the list of configurable items.
ProviderClass = getattr(providers, self.provider_type)
self.classes.append(ProviderClass)
if any(x in self.argv for x in ('-h', '--help-all', '--help')):
self.print_help('--help-all' in self.argv)
self.exit(0)
if '--version' in self.argv or '-V' in self.argv:
self.print_version()
self.exit(0)
# Generate a configuration file if flag is given.
if '--generate-config' in self.argv:
conf = self.generate_config_file()
with open(self.config_file, 'w') as f:
f.write(conf)
self.exit(0)
# If not config, parse commands.
## Run sanity checks.
# Check that the minimum number of arguments have been called.
if len(self.argv) < 2:
raise JhubctlError(
"Not enough arguments. \n\n"
"Expected: jhubctl <action> <resource> <name>")
# Check action
self.resource_action = self.argv[0]
if self.resource_action not in self.subcommands:
raise JhubctlError(
f"Subcommand is not recognized; must be one of these: {self.subcommands}")
# Check resource
self.resource_type = self.argv[1]
if self.resource_type not in self.resources:
raise JhubctlError(
f"First argument after a subcommand must one of these"
f"resources: {self.resources}"
)
# Get name of resource.
try:
self.resource_name = self.argv[2]
except IndexError:
if self.resource_action != "get":
raise JhubctlError(
"Not enough arguments. \n\n"
"Expected: jhubctl <action> <resource> <name>")
else:
self.resource_name = None
# flatten flags&aliases, so cl-args get appropriate priority:
flags, aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags, log=self.log)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args
|
Parse the jhubctl command line arguments.
This overwrites traitlets' default `parse_command_line` method
and tailors it to jhubctl's needs.
|
def length_of_overlap(first_start, first_end, second_start, second_end):
"""
Find the length of the overlapping part of two segments.
Args:
first_start (float): Start of the first segment.
first_end (float): End of the first segment.
second_start (float): Start of the second segment.
second_end (float): End of the second segment.
Return:
float: The amount of overlap or 0 if they don't overlap at all.
"""
if first_end <= second_start or first_start >= second_end:
return 0.0
if first_start < second_start:
if first_end < second_end:
return abs(first_end - second_start)
else:
return abs(second_end - second_start)
if first_start > second_start:
if first_end > second_end:
return abs(second_end - first_start)
else:
return abs(first_end - first_start)
|
Find the length of the overlapping part of two segments.
Args:
first_start (float): Start of the first segment.
first_end (float): End of the first segment.
second_start (float): Start of the second segment.
second_end (float): End of the second segment.
Return:
float: The amount of overlap or 0 if they don't overlap at all.
|
def _createAssociateRequest(self, endpoint, assoc_type, session_type):
"""Create an association request for the given assoc_type and
session_type.
@param endpoint: The endpoint whose server_url will be
queried. The important bit about the endpoint is whether
it's in compatiblity mode (OpenID 1.1)
@param assoc_type: The association type that the request
should ask for.
@type assoc_type: str
@param session_type: The session type that should be used in
the association request. The session_type is used to
create an association session object, and that session
object is asked for any additional fields that it needs to
add to the request.
@type session_type: str
@returns: a pair of the association session object and the
request message that will be sent to the server.
@rtype: (association session type (depends on session_type),
openid.message.Message)
"""
session_type_class = self.session_types[session_type]
assoc_session = session_type_class()
args = {
'mode': 'associate',
'assoc_type': assoc_type,
}
if not endpoint.compatibilityMode():
args['ns'] = OPENID2_NS
# Leave out the session type if we're in compatibility mode
# *and* it's no-encryption.
if (not endpoint.compatibilityMode() or
assoc_session.session_type != 'no-encryption'):
args['session_type'] = assoc_session.session_type
args.update(assoc_session.getRequest())
message = Message.fromOpenIDArgs(args)
return assoc_session, message
|
Create an association request for the given assoc_type and
session_type.
@param endpoint: The endpoint whose server_url will be
queried. The important bit about the endpoint is whether
it's in compatiblity mode (OpenID 1.1)
@param assoc_type: The association type that the request
should ask for.
@type assoc_type: str
@param session_type: The session type that should be used in
the association request. The session_type is used to
create an association session object, and that session
object is asked for any additional fields that it needs to
add to the request.
@type session_type: str
@returns: a pair of the association session object and the
request message that will be sent to the server.
@rtype: (association session type (depends on session_type),
openid.message.Message)
|
def install_new_pipeline():
"""
Install above transformer into the existing pipeline creator.
"""
def new_create_pipeline(context, *args, **kwargs):
result = old_create_pipeline(context, *args, **kwargs)
result.insert(1, DAAPObjectTransformer(context))
return result
old_create_pipeline = Pipeline.create_pipeline
Pipeline.create_pipeline = new_create_pipeline
|
Install above transformer into the existing pipeline creator.
|
def cms_identify(self, url, timeout=15, headers={}):
"""
Function called when attempting to determine if a URL is identified
as being this particular CMS.
@param url: the URL to attempt to identify.
@param timeout: number of seconds before a timeout occurs on a http
connection.
@param headers: custom HTTP headers as expected by requests.
@return: a boolean value indiciating whether this CMS is identified
as being this particular CMS.
"""
self.out.debug("cms_identify")
if isinstance(self.regular_file_url, str):
rfu = [self.regular_file_url]
else:
rfu = self.regular_file_url
is_cms = False
for regular_file_url in rfu:
try:
hash = self.enumerate_file_hash(url, regular_file_url, timeout,
headers)
except RuntimeError:
continue
hash_exists = self.vf.has_hash(hash)
if hash_exists:
is_cms = True
break
return is_cms
|
Function called when attempting to determine if a URL is identified
as being this particular CMS.
@param url: the URL to attempt to identify.
@param timeout: number of seconds before a timeout occurs on a http
connection.
@param headers: custom HTTP headers as expected by requests.
@return: a boolean value indiciating whether this CMS is identified
as being this particular CMS.
|
def double_percent_options_to_metadata(options):
"""Parse double percent options"""
matches = _PERCENT_CELL.findall('# %%' + options)
# Fail safe when regexp matching fails #116
# (occurs e.g. if square brackets are found in the title)
if not matches:
return {'title': options.strip()}
matches = matches[0]
# Fifth match are JSON metadata
if matches[4]:
metadata = json_options_to_metadata(matches[4], add_brackets=False)
else:
metadata = {}
# Third match is cell type
cell_type = matches[2]
if cell_type:
metadata['cell_type'] = cell_type[1:-1]
# Second and fourth match are description
title = [matches[i].strip() for i in [1, 3]]
title = [part for part in title if part]
if title:
title = ' '.join(title)
cell_depth = 0
while title.startswith('%'):
cell_depth += 1
title = title[1:]
if cell_depth:
metadata['cell_depth'] = cell_depth
metadata['title'] = title.strip()
return metadata
|
Parse double percent options
|
def com_google_fonts_check_name_typographicsubfamilyname(ttFont, style_with_spaces):
""" Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """
from fontbakery.utils import name_entry_id
failed = False
if style_with_spaces in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME:
failed = True
yield FAIL, Message("ribbi",
("Font style is '{}' and, for that reason,"
" it is not expected to have a "
"{} entry!").format(style_with_spaces,
name_entry_id(name)))
else:
expected_value = style_with_spaces
has_entry = False
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME:
string = name.string.decode(name.getEncoding()).strip()
if string == expected_value:
has_entry = True
else:
failed = True
yield FAIL, Message("non-ribbi-bad-value",
("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
string))
if not failed and not has_entry:
failed = True
yield FAIL, Message("non-ribbi-lacks-entry",
("non-RIBBI fonts must have a"
" TYPOGRAPHIC_SUBFAMILY_NAME entry"
" on the name table."))
if not failed:
yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good."
|
Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries.
|
def convertToBool():
""" Convert a byte value to boolean (0 or 1) if
the global flag strictBool is True
"""
if not OPTIONS.strictBool.value:
return []
REQUIRES.add('strictbool.asm')
result = []
result.append('pop af')
result.append('call __NORMALIZE_BOOLEAN')
result.append('push af')
return result
|
Convert a byte value to boolean (0 or 1) if
the global flag strictBool is True
|
def string(self, writesize=None):
'''
Looks like a file handle
'''
if not self.finished:
self.finished = True
return self.content
return ''
|
Looks like a file handle
|
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset]
|
Returns collector path
servers.host.cpu.total.idle
return "cpu"
|
def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
"""Upload Nginx site configuration from a template."""
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name)
|
Upload Nginx site configuration from a template.
|
def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest')
|
Ingest a finished recording to the Opencast server.
|
def load_transform(fname):
"""Load affine transform from file
Parameters
----------
fname : str or None
Filename of an LTA or FSL-style MAT transform file.
If ``None``, return an identity transform
Returns
-------
affine : (4, 4) numpy.ndarray
"""
if fname is None:
return np.eye(4)
if fname.endswith('.mat'):
return np.loadtxt(fname)
elif fname.endswith('.lta'):
with open(fname, 'rb') as fobj:
for line in fobj:
if line.startswith(b'1 4 4'):
break
lines = fobj.readlines()[:4]
return np.genfromtxt(lines)
raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)")
|
Load affine transform from file
Parameters
----------
fname : str or None
Filename of an LTA or FSL-style MAT transform file.
If ``None``, return an identity transform
Returns
-------
affine : (4, 4) numpy.ndarray
|
def select_month(self, month):
"""
选择月份
@2018/06/03 pandas 的索引问题导致
https://github.com/pandas-dev/pandas/issues/21299
因此先用set_index去重做一次index
影响的有selects,select_time,select_month,get_bar
@2018/06/04
当选择的时间越界/股票不存在,raise ValueError
@2018/06/04 pandas索引问题已经解决
全部恢复
"""
def _select_month(month):
return self.data.loc[month, slice(None)]
try:
return self.new(_select_month(month), self.type, self.if_fq)
except:
raise ValueError('QA CANNOT GET THIS Month {} '.format(month))
|
选择月份
@2018/06/03 pandas 的索引问题导致
https://github.com/pandas-dev/pandas/issues/21299
因此先用set_index去重做一次index
影响的有selects,select_time,select_month,get_bar
@2018/06/04
当选择的时间越界/股票不存在,raise ValueError
@2018/06/04 pandas索引问题已经解决
全部恢复
|
def make_df_from_batch(batch_name, batch_col="b01", reader=None, reader_label=None):
"""Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame)
"""
batch_name = batch_name
batch_col = batch_col
logger.debug(f"batch_name, batch_col: {batch_name}, {batch_col}")
if reader is None:
reader_obj = get_db_reader(reader_label)
reader = reader_obj()
srnos = reader.select_batch(batch_name, batch_col)
logger.debug("srnos:" + str(srnos))
info_dict = _create_info_dict(reader, srnos)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = _make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(create_labels)
info_df.set_index("filenames", inplace=True)
return info_df
|
Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame)
|
def color(string, name, style='normal', when='auto'):
""" Change the color of the given string. """
if name not in colors:
from .text import oxford_comma
raise ValueError("unknown color '{}'.\nknown colors are: {}".format(
name, oxford_comma(["'{}'".format(x) for x in sorted(colors)])))
if style not in styles:
from .text import oxford_comma
raise ValueError("unknown style '{}'.\nknown styles are: {}".format(
style, oxford_comma(["'{}'".format(x) for x in sorted(styles)])))
prefix = '\033[%d;%dm' % (styles[style], colors[name])
suffix = '\033[%d;%dm' % (styles['normal'], colors['normal'])
color_string = prefix + string + suffix
if when == 'always':
return color_string
elif when == 'auto':
return color_string if sys.stdout.isatty() else string
elif when == 'never':
return string
else:
raise ValueError("when must be one of: 'always', 'auto', 'never'")
|
Change the color of the given string.
|
def first_solar_spectral_loss(self, pw, airmass_absolute):
"""
Use the :py:func:`first_solar_spectral_correction` function to
calculate the spectral loss modifier. The model coefficients are
specific to the module's cell type, and are determined by searching
for one of the following keys in self.module_parameters (in order):
'first_solar_spectral_coefficients' (user-supplied coefficients)
'Technology' - a string describing the cell type, can be read from
the CEC module parameter database
'Material' - a string describing the cell type, can be read from
the Sandia module database.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
"""
if 'first_solar_spectral_coefficients' in \
self.module_parameters.keys():
coefficients = \
self.module_parameters['first_solar_spectral_coefficients']
module_type = None
else:
module_type = self._infer_cell_type()
coefficients = None
return atmosphere.first_solar_spectral_correction(pw,
airmass_absolute,
module_type,
coefficients)
|
Use the :py:func:`first_solar_spectral_correction` function to
calculate the spectral loss modifier. The model coefficients are
specific to the module's cell type, and are determined by searching
for one of the following keys in self.module_parameters (in order):
'first_solar_spectral_coefficients' (user-supplied coefficients)
'Technology' - a string describing the cell type, can be read from
the CEC module parameter database
'Material' - a string describing the cell type, can be read from
the Sandia module database.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
|
def putParamset(self, paramset, data={}):
"""
Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed.
"""
try:
if paramset in self._PARAMSETS and data:
self._proxy.putParamset(self._ADDRESS, paramset, data)
# We update all paramsets to at least have a temporarily accurate state for the device.
# This might not be true for tasks that take long to complete (lifting a rollershutter completely etc.).
# For this the server-process has to call the updateParamsets-method when it receives events for the device.
self.updateParamsets()
return True
else:
return False
except Exception as err:
LOG.error("HMGeneric.putParamset: Exception: " + str(err))
return False
|
Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed.
|
def motif3struct_bin(A):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 13xN np.ndarray
motif frequency matrix
f : 13x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n = len(A) # number of vertices in A
f = np.zeros((13,)) # motif count for whole graph
F = np.zeros((13, n)) # motif frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, v2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = id3[np.squeeze(s == m3n)] - 1
F[ix, u] += 1
F[ix, v1] += 1
F[ix, v2] += 1
f[ix] += 1
return f, F
|
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 13xN np.ndarray
motif frequency matrix
f : 13x1 np.ndarray
motif frequency vector (averaged over all nodes)
|
def on_push(self, device):
"""Press button. Check DEFAULT_DELAY.
:param scapy.packet.Packet device: Scapy packet
:return: None
"""
src = device.src.lower()
if last_execution[src] + self.settings.get('delay', DEFAULT_DELAY) > time.time():
return
last_execution[src] = time.time()
self.execute(device)
|
Press button. Check DEFAULT_DELAY.
:param scapy.packet.Packet device: Scapy packet
:return: None
|
def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
'''
Ensure range record is present.
infoblox_range.present:
start_addr: '129.97.150.160',
end_addr: '129.97.150.170',
Verbose state example:
.. code-block:: yaml
infoblox_range.present:
data: {
'always_update_dns': False,
'authority': False,
'comment': 'range of IP addresses used for salt.. was used for ghost images deployment',
'ddns_generate_hostname': True,
'deny_all_clients': False,
'deny_bootp': False,
'disable': False,
'email_list': [],
'enable_ddns': False,
'enable_dhcp_thresholds': False,
'enable_email_warnings': False,
'enable_ifmap_publishing': False,
'enable_snmp_warnings': False,
'end_addr': '129.97.150.169',
'exclude': [],
'extattrs': {},
'fingerprint_filter_rules': [],
'high_water_mark': 95,
'high_water_mark_reset': 85,
'ignore_dhcp_option_list_request': False,
'lease_scavenge_time': -1,
'logic_filter_rules': [],
'low_water_mark': 0,
'low_water_mark_reset': 10,
'mac_filter_rules': [],
'member': {'_struct': 'dhcpmember',
'ipv4addr': '129.97.128.9',
'name': 'cn-dhcp-mc.example.ca'},
'ms_options': [],
'nac_filter_rules': [],
'name': 'ghost-range',
'network': '129.97.150.0/24',
'network_view': 'default',
'option_filter_rules': [],
'options': [{'name': 'dhcp-lease-time',
'num': 51,
'use_option': False,
'value': '43200',
'vendor_class': 'DHCP'}],
'recycle_leases': True,
'relay_agent_filter_rules': [],
'server_association_type': 'MEMBER',
'start_addr': '129.97.150.160',
'update_dns_on_lease_renewal': False,
'use_authority': False,
'use_bootfile': False,
'use_bootserver': False,
'use_ddns_domainname': False,
'use_ddns_generate_hostname': True,
'use_deny_bootp': False,
'use_email_list': False,
'use_enable_ddns': False,
'use_enable_dhcp_thresholds': False,
'use_enable_ifmap_publishing': False,
'use_ignore_dhcp_option_list_request': False,
'use_known_clients': False,
'use_lease_scavenge_time': False,
'use_nextserver': False,
'use_options': False,
'use_recycle_leases': False,
'use_unknown_clients': False,
'use_update_dns_on_lease_renewal': False
}
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
if 'start_addr' not in data:
data.update({'start_addr': start_addr})
if 'end_addr' not in data:
data.update({'end_addr': end_addr})
obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts)
if obj:
diff = __salt__['infoblox.diff_objects'](data, obj)
if not diff:
ret['result'] = True
ret['comment'] = 'supplied fields in correct state'
return ret
if diff:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to update record'
return ret
new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts)
ret['result'] = True
ret['comment'] = 'record fields updated'
ret['changes'] = {'diff': diff}
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to create record {0}'.format(name)
return ret
new_obj_ref = __salt__['infoblox.create_ipv4_range'](data, **api_opts)
new_obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
ret['result'] = True
ret['comment'] = 'record created'
ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}}
return ret
|
Ensure range record is present.
infoblox_range.present:
start_addr: '129.97.150.160',
end_addr: '129.97.150.170',
Verbose state example:
.. code-block:: yaml
infoblox_range.present:
data: {
'always_update_dns': False,
'authority': False,
'comment': 'range of IP addresses used for salt.. was used for ghost images deployment',
'ddns_generate_hostname': True,
'deny_all_clients': False,
'deny_bootp': False,
'disable': False,
'email_list': [],
'enable_ddns': False,
'enable_dhcp_thresholds': False,
'enable_email_warnings': False,
'enable_ifmap_publishing': False,
'enable_snmp_warnings': False,
'end_addr': '129.97.150.169',
'exclude': [],
'extattrs': {},
'fingerprint_filter_rules': [],
'high_water_mark': 95,
'high_water_mark_reset': 85,
'ignore_dhcp_option_list_request': False,
'lease_scavenge_time': -1,
'logic_filter_rules': [],
'low_water_mark': 0,
'low_water_mark_reset': 10,
'mac_filter_rules': [],
'member': {'_struct': 'dhcpmember',
'ipv4addr': '129.97.128.9',
'name': 'cn-dhcp-mc.example.ca'},
'ms_options': [],
'nac_filter_rules': [],
'name': 'ghost-range',
'network': '129.97.150.0/24',
'network_view': 'default',
'option_filter_rules': [],
'options': [{'name': 'dhcp-lease-time',
'num': 51,
'use_option': False,
'value': '43200',
'vendor_class': 'DHCP'}],
'recycle_leases': True,
'relay_agent_filter_rules': [],
'server_association_type': 'MEMBER',
'start_addr': '129.97.150.160',
'update_dns_on_lease_renewal': False,
'use_authority': False,
'use_bootfile': False,
'use_bootserver': False,
'use_ddns_domainname': False,
'use_ddns_generate_hostname': True,
'use_deny_bootp': False,
'use_email_list': False,
'use_enable_ddns': False,
'use_enable_dhcp_thresholds': False,
'use_enable_ifmap_publishing': False,
'use_ignore_dhcp_option_list_request': False,
'use_known_clients': False,
'use_lease_scavenge_time': False,
'use_nextserver': False,
'use_options': False,
'use_recycle_leases': False,
'use_unknown_clients': False,
'use_update_dns_on_lease_renewal': False
}
|
def parse_hostname(hostname, default_port):
'''
Parse hostname string and return a tuple of (host, port)
If port missing in hostname string then use default_port
If anything is not a valid then return None
hostname should contain a host and an option space delimited port
host port
As an attempt to prevent foolish mistakes the parser also tries to identify
the port when it is colon delimited not space delimited. As in host:port.
This is problematic since IPV6 addresses may have colons in them.
Consequently the use of colon delimited ports is strongly discouraged.
An ipv6 address must have at least 2 colons.
'''
try:
host, sep, port = hostname.strip().rpartition(' ')
if not port: # invalid nothing there
return None
if not host: # no space separated port, only host as port use default port
host = port
port = default_port
# ipv6 must have two or more colons
if host.count(':') == 1: # only one so may be using colon delimited port
host, sep, port = host.rpartition(':')
if not host: # colon but not host so invalid
return None
if not port: # colon but no port so use default
port = default_port
host = host.strip()
try:
port = int(port)
except ValueError:
return None
except AttributeError:
return None
return (host, port)
|
Parse hostname string and return a tuple of (host, port)
If port missing in hostname string then use default_port
If anything is not a valid then return None
hostname should contain a host and an option space delimited port
host port
As an attempt to prevent foolish mistakes the parser also tries to identify
the port when it is colon delimited not space delimited. As in host:port.
This is problematic since IPV6 addresses may have colons in them.
Consequently the use of colon delimited ports is strongly discouraged.
An ipv6 address must have at least 2 colons.
|
def find(*_, **kwargs):
""" Find user by id/email"""
click.echo(green('\nFind user:'))
click.echo(green('-' * 40))
with get_app().app_context():
user = find_user(kwargs)
if not user:
click.echo(red('Not found\n'))
return
click.echo(str(user) + '\n')
return
|
Find user by id/email
|
def create_entity(self,
workspace_id,
entity,
description=None,
metadata=None,
fuzzy_match=None,
values=None,
**kwargs):
"""
Create entity.
Create a new entity, or enable a system entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It must be no longer than 64 characters.
If you specify an entity name beginning with the reserved prefix `sys-`, it must
be the name of a system entity that you want to enable. (Any entity content
specified with the request is ignored.).
:param str description: The description of the entity. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param dict metadata: Any metadata related to the entity.
:param bool fuzzy_match: Whether to use fuzzy matching for the entity.
:param list[CreateValue] values: An array of objects describing the entity values.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if entity is None:
raise ValueError('entity must be provided')
if values is not None:
values = [self._convert_model(x, CreateValue) for x in values]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'create_entity')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'entity': entity,
'description': description,
'metadata': metadata,
'fuzzy_match': fuzzy_match,
'values': values
}
url = '/v1/workspaces/{0}/entities'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
|
Create entity.
Create a new entity, or enable a system entity.
This operation is limited to 1000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str entity: The name of the entity. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, and hyphen characters.
- It must be no longer than 64 characters.
If you specify an entity name beginning with the reserved prefix `sys-`, it must
be the name of a system entity that you want to enable. (Any entity content
specified with the request is ignored.).
:param str description: The description of the entity. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param dict metadata: Any metadata related to the entity.
:param bool fuzzy_match: Whether to use fuzzy matching for the entity.
:param list[CreateValue] values: An array of objects describing the entity values.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
|
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Purdue
Boilermakers'.
"""
if self.winner == HOME:
if 'cbb/schools' not in str(self._home_name):
return str(self._home_name)
return self._home_name.text()
if 'cbb/schools' not in str(self._away_name):
return str(self._away_name)
return self._away_name.text()
|
Returns a ``string`` of the winning team's name, such as 'Purdue
Boilermakers'.
|
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
|
Returns a string representation of the FILTERS dictionary.
|
def _enum_from_direction(direction):
"""Convert a string representation of a direction to an enum.
Args:
direction (str): A direction to order by. Must be one of
:attr:`~.firestore.Query.ASCENDING` or
:attr:`~.firestore.Query.DESCENDING`.
Returns:
int: The enum corresponding to ``direction``.
Raises:
ValueError: If ``direction`` is not a valid direction.
"""
if isinstance(direction, int):
return direction
if direction == Query.ASCENDING:
return enums.StructuredQuery.Direction.ASCENDING
elif direction == Query.DESCENDING:
return enums.StructuredQuery.Direction.DESCENDING
else:
msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING)
raise ValueError(msg)
|
Convert a string representation of a direction to an enum.
Args:
direction (str): A direction to order by. Must be one of
:attr:`~.firestore.Query.ASCENDING` or
:attr:`~.firestore.Query.DESCENDING`.
Returns:
int: The enum corresponding to ``direction``.
Raises:
ValueError: If ``direction`` is not a valid direction.
|
def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
extn = filename[_indx+1:-1]
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
_extn = [_extns[0],int(_extns[1])]
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = [int(_indx)]
elif isinstance(extn, str):
# Only one extension value specified...
if extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = extn
_extn = [_nextn]
else:
# Only integer extension number given, or default of 0 is used.
_extn = [int(extn)]
else:
_fname = filename
_extn = None
return _fname,_extn
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
|
def _parse_common_paths_file(project_path):
"""
Parses a common_paths.xml file and returns a dictionary of paths,
a dictionary of annotation level descriptions and the filename
of the style file.
Parameters
----------
project_path : str
path to the root directory of the MMAX project
Returns
-------
paths : dict
maps from MMAX file types (str, e.g. 'basedata' or 'markable')
to the relative path (str) containing files of this type
annotations : dict
maps from MMAX annotation level names (str, e.g. 'sentence',
'primmark') to a dict of features.
The features are: 'schemefile' (maps to a file),
'customization_file' (ditto) and 'file_extension' (maps to the
file name ending used for all annotations files of this level)
stylefile : str
name of the (default) style file used in this MMAX project
"""
common_paths_file = os.path.join(project_path, 'common_paths.xml')
tree = etree.parse(common_paths_file)
paths = {}
path_vars = ['basedata', 'scheme', 'style', 'style', 'customization',
'markable']
for path_var in path_vars:
specific_path = tree.find('//{}_path'.format(path_var)).text
paths[path_var] = specific_path if specific_path else project_path
paths['project_path'] = project_path
annotations = {}
for level in tree.iterfind('//level'):
annotations[level.attrib['name']] = {
'schemefile': level.attrib['schemefile'],
'customization_file': level.attrib['customization_file'],
'file_extension': level.text[1:]}
stylesheet = tree.find('//stylesheet').text
return paths, annotations, stylesheet
|
Parses a common_paths.xml file and returns a dictionary of paths,
a dictionary of annotation level descriptions and the filename
of the style file.
Parameters
----------
project_path : str
path to the root directory of the MMAX project
Returns
-------
paths : dict
maps from MMAX file types (str, e.g. 'basedata' or 'markable')
to the relative path (str) containing files of this type
annotations : dict
maps from MMAX annotation level names (str, e.g. 'sentence',
'primmark') to a dict of features.
The features are: 'schemefile' (maps to a file),
'customization_file' (ditto) and 'file_extension' (maps to the
file name ending used for all annotations files of this level)
stylefile : str
name of the (default) style file used in this MMAX project
|
def put(self, resource):
""" Edits an existing resource
Args:
resource - gophish.models.Model - The resource instance
"""
endpoint = self.endpoint
if resource.id:
endpoint = self._build_url(endpoint, resource.id)
response = self.api.execute("PUT", endpoint, json=resource.as_dict())
if not response.ok:
raise Error.parse(response.json())
return self._cls.parse(response.json())
|
Edits an existing resource
Args:
resource - gophish.models.Model - The resource instance
|
def get_module_names(p):
'''Accepts a path to search for modules. The method will filter on files
that end in .pyc or files that start with __.
Arguments:
p (string): The path to search
Returns:
list of file names
'''
mods = list()
mods = [f.split('.')[0] for f in listdir(p)
if isfile(join(p, f)) and not f.endswith('.pyc') and not f.startswith('__')]
print len(mods)
return mods
|
Accepts a path to search for modules. The method will filter on files
that end in .pyc or files that start with __.
Arguments:
p (string): The path to search
Returns:
list of file names
|
def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element."""
xml_value = self._processor.parse_at_element(element, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value)
|
Parse the given element.
|
def delete_certificate_issuer_config_by_id(self, certificate_issuer_configuration_id, **kwargs): # noqa: E501
"""Delete certificate issuer configuration. # noqa: E501
Delete the configured certificate issuer configuration. You can only delete the configurations of custom certificates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_certificate_issuer_config_by_id(certificate_issuer_configuration_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str certificate_issuer_configuration_id: The ID of the certificate issuer configuration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_certificate_issuer_config_by_id_with_http_info(certificate_issuer_configuration_id, **kwargs) # noqa: E501
else:
(data) = self.delete_certificate_issuer_config_by_id_with_http_info(certificate_issuer_configuration_id, **kwargs) # noqa: E501
return data
|
Delete certificate issuer configuration. # noqa: E501
Delete the configured certificate issuer configuration. You can only delete the configurations of custom certificates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_certificate_issuer_config_by_id(certificate_issuer_configuration_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str certificate_issuer_configuration_id: The ID of the certificate issuer configuration. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
|
def _start_monitoring(self):
""" Internal method that monitors the directory for changes """
# Grab all the timestamp info
before = self._file_timestamp_info(self.path)
while True:
gevent.sleep(1)
after = self._file_timestamp_info(self.path)
added = [fname for fname in after.keys() if fname not in before.keys()]
removed = [fname for fname in before.keys() if fname not in after.keys()]
modified = []
for fname in before.keys():
if fname not in removed:
if os.path.getmtime(fname) != before.get(fname):
modified.append(fname)
if added:
self.on_create(added)
if removed:
self.on_delete(removed)
if modified:
self.on_modify(modified)
before = after
|
Internal method that monitors the directory for changes
|
def check_constraints(self, instance):
'''
Return fieldnames which need recalculation.
'''
recalc_fields = []
for constraint in self.constraints:
try:
constraint(self.model, instance)
except constraints.InvalidConstraint as e:
recalc_fields.extend(e.fields)
return recalc_fields
|
Return fieldnames which need recalculation.
|
def survey(self, pk=None, **kwargs):
"""Get the survey_spec for the job template.
To write a survey, use the modify command with the --survey-spec parameter.
=====API DOCS=====
Get the survey specification of a resource object.
:param pk: Primary key of the resource to retrieve survey from. Tower CLI will only attempt to
read *that* object if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve survey if ``pk``
is not provided.
:returns: loaded JSON of the retrieved survey specification of the resource object.
:rtype: dict
=====API DOCS=====
"""
job_template = self.get(pk=pk, **kwargs)
if settings.format == 'human':
settings.format = 'json'
return client.get(self._survey_endpoint(job_template['id'])).json()
|
Get the survey_spec for the job template.
To write a survey, use the modify command with the --survey-spec parameter.
=====API DOCS=====
Get the survey specification of a resource object.
:param pk: Primary key of the resource to retrieve survey from. Tower CLI will only attempt to
read *that* object if ``pk`` is provided (not ``None``).
:type pk: int
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve survey if ``pk``
is not provided.
:returns: loaded JSON of the retrieved survey specification of the resource object.
:rtype: dict
=====API DOCS=====
|
def maybe(func):
"""Calls `f` in a try/except block, returning a `Fail` object if
the call fails in any way. If any of the arguments to the call are Fail
objects, the call is not attempted."""
name = object_name(func)
@wraps(func)
def maybe_wrapped(*args, **kwargs):
"""@maybe wrapped version of ``func``."""
fails = [
(name, k, v)
for k, v in chain(enumerate(args), kwargs.items())
if isinstance(v, Fail)]
if fails:
return Fail(func, fails=fails)
try:
result = func(*args, **kwargs)
except Exception as exc:
return Fail(func, exception=exc)
else:
if isinstance(result, Fail):
result.add_call(func)
return result
return maybe_wrapped
|
Calls `f` in a try/except block, returning a `Fail` object if
the call fails in any way. If any of the arguments to the call are Fail
objects, the call is not attempted.
|
def user(self, message):
""" Creates a user log (if user logging is turned on)
Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is
defined, sends to STDOUT
Note: Does *not* use Java string formatting like Sikuli.
Format your message with Python ``basestring.format()`` instead.
"""
if Settings.UserLogs:
self._write_log(Settings.UserLogPrefix, Settings.UserLogTime, message)
|
Creates a user log (if user logging is turned on)
Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is
defined, sends to STDOUT
Note: Does *not* use Java string formatting like Sikuli.
Format your message with Python ``basestring.format()`` instead.
|
def num_tasks(self, work_spec_name):
'''Get the total number of work units for some work spec.'''
return self.num_finished(work_spec_name) + \
self.num_failed(work_spec_name) + \
self.registry.len(WORK_UNITS_ + work_spec_name)
|
Get the total number of work units for some work spec.
|
def _data(self):
"""A simpler version of data to avoid infinite recursion in some cases.
Don't use this.
"""
if self.is_caching:
return self.cache
with open(self.path, "r") as f:
return json.load(f)
|
A simpler version of data to avoid infinite recursion in some cases.
Don't use this.
|
def sorted_proposals(proposals, scopepref=None, typepref=None):
"""Sort a list of proposals
Return a sorted list of the given `CodeAssistProposal`\s.
`scopepref` can be a list of proposal scopes. Defaults to
``['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']``.
`typepref` can be a list of proposal types. Defaults to
``['class', 'function', 'instance', 'module', None]``.
(`None` stands for completions with no type like keywords.)
"""
sorter = _ProposalSorter(proposals, scopepref, typepref)
return sorter.get_sorted_proposal_list()
|
Sort a list of proposals
Return a sorted list of the given `CodeAssistProposal`\s.
`scopepref` can be a list of proposal scopes. Defaults to
``['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']``.
`typepref` can be a list of proposal types. Defaults to
``['class', 'function', 'instance', 'module', None]``.
(`None` stands for completions with no type like keywords.)
|
def set_storage_controller_bootable(self, name, bootable):
"""Sets the bootable flag of the storage controller with the given name.
in name of type str
in bootable of type bool
raises :class:`VBoxErrorObjectNotFound`
A storage controller with given name doesn't exist.
raises :class:`VBoxErrorObjectInUse`
Another storage controller is marked as bootable already.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(bootable, bool):
raise TypeError("bootable can only be an instance of type bool")
self._call("setStorageControllerBootable",
in_p=[name, bootable])
|
Sets the bootable flag of the storage controller with the given name.
in name of type str
in bootable of type bool
raises :class:`VBoxErrorObjectNotFound`
A storage controller with given name doesn't exist.
raises :class:`VBoxErrorObjectInUse`
Another storage controller is marked as bootable already.
|
def assortativity_wei(CIJ, flag=0):
'''
The assortativity coefficient is a correlation coefficient between the
strengths (weighted degrees) of all nodes on two opposite ends of a link.
A positive assortativity coefficient indicates that nodes tend to link to
other nodes with the same or similar strength.
Parameters
----------
CIJ : NxN np.ndarray
weighted directed/undirected connection matrix
flag : int
0 : undirected graph; strength/strength correlation
1 : directed graph; out-strength/in-strength correlation
2 : directed graph; in-strength/out-strength correlation
3 : directed graph; out-strength/out-strength correlation
4 : directed graph; in-strength/in-strengthn correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage.
'''
if flag == 0: # undirected version
str = strengths_und(CIJ)
i, j = np.where(np.triu(CIJ, 1) > 0)
K = len(i)
stri = str[i]
strj = str[j]
else:
ist, ost = strengths_dir(CIJ) # directed version
i, j = np.where(CIJ > 0)
K = len(i)
if flag == 1:
stri = ost[i]
strj = ist[j]
elif flag == 2:
stri = ist[i]
strj = ost[j]
elif flag == 3:
stri = ost[i]
strj = ost[j]
elif flag == 4:
stri = ist[i]
strj = ost[j]
else:
raise ValueError('Flag must be 0-4')
# compute assortativity
term1 = np.sum(stri * strj) / K
term2 = np.square(np.sum(.5 * (stri + strj)) / K)
term3 = np.sum(.5 * (stri * stri + strj * strj)) / K
r = (term1 - term2) / (term3 - term2)
return r
|
The assortativity coefficient is a correlation coefficient between the
strengths (weighted degrees) of all nodes on two opposite ends of a link.
A positive assortativity coefficient indicates that nodes tend to link to
other nodes with the same or similar strength.
Parameters
----------
CIJ : NxN np.ndarray
weighted directed/undirected connection matrix
flag : int
0 : undirected graph; strength/strength correlation
1 : directed graph; out-strength/in-strength correlation
2 : directed graph; in-strength/out-strength correlation
3 : directed graph; out-strength/out-strength correlation
4 : directed graph; in-strength/in-strengthn correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage.
|
def append(self, other):
"""
Append a Printable Image at the end of the current instance.
:param other: another PrintableImage
:return: PrintableImage containing data from both self and other
"""
self.data.extend(other.data)
self.height = self.height + other.height
return self
|
Append a Printable Image at the end of the current instance.
:param other: another PrintableImage
:return: PrintableImage containing data from both self and other
|
def wgs84_to_utm(lng, lat, utm_crs=None):
""" Convert WGS84 coordinates to UTM. If UTM CRS is not set it will be calculated automatically.
:param lng: longitude in WGS84 system
:type lng: float
:param lat: latitude in WGS84 system
:type lat: float
:param utm_crs: UTM coordinate reference system enum constants
:type utm_crs: constants.CRS or None
:return: east, north coordinates in UTM system
:rtype: float, float
"""
if utm_crs is None:
utm_crs = get_utm_crs(lng, lat)
return transform_point((lng, lat), CRS.WGS84, utm_crs)
|
Convert WGS84 coordinates to UTM. If UTM CRS is not set it will be calculated automatically.
:param lng: longitude in WGS84 system
:type lng: float
:param lat: latitude in WGS84 system
:type lat: float
:param utm_crs: UTM coordinate reference system enum constants
:type utm_crs: constants.CRS or None
:return: east, north coordinates in UTM system
:rtype: float, float
|
def get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed reads as a
percentage of provisioned reads
"""
try:
metrics = __get_aws_metric(
table_name,
gsi_name,
lookback_window_start,
lookback_period,
'ConsumedReadCapacityUnits')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
consumed_read_units = (
float(metrics[0]['Sum']) / float(lookback_seconds))
else:
consumed_read_units = 0
try:
gsi_read_units = dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name)
consumed_read_units_percent = (
float(consumed_read_units) /
float(gsi_read_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - GSI: {1} - Consumed read units: {2:.2f}%'.format(
table_name, gsi_name, consumed_read_units_percent))
return consumed_read_units_percent
|
Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed reads as a
percentage of provisioned reads
|
def get_queryset(self):
"""
Return queryset limited to not removed entries.
"""
kwargs = {'model': self.model, 'using': self._db}
if hasattr(self, '_hints'):
kwargs['hints'] = self._hints
return self._queryset_class(**kwargs).filter(is_removed=False)
|
Return queryset limited to not removed entries.
|
def update_name( self, name ):
"""
Rename the current checklist item. Returns a new ChecklistItem object.
"""
checklistitem_json = self.fetch_json(
uri_path = self.base_uri + '/name',
http_method = 'PUT',
query_params = {'value': name}
)
return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
|
Rename the current checklist item. Returns a new ChecklistItem object.
|
def remove_child(self, *sprites):
"""Remove one or several :class:`Sprite` sprites from scene """
# first drop focus
scene = self.get_scene()
if scene:
child_sprites = list(self.all_child_sprites())
if scene._focus_sprite in child_sprites:
scene._focus_sprite = None
for sprite in sprites:
if sprite in self.sprites:
self.sprites.remove(sprite)
sprite._scene = None
sprite.parent = None
self.disconnect_child(sprite)
self._sort()
self.redraw()
|
Remove one or several :class:`Sprite` sprites from scene
|
def slice(self, start_time, end_time, strict=False):
'''
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
'''
sliced_array = AnnotationArray()
for ann in self:
sliced_array.append(ann.slice(start_time, end_time, strict=strict))
return sliced_array
|
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
|
def evaluate(x, amplitude, mean, stddev):
"""
GaussianAbsorption1D model function.
"""
return 1.0 - Gaussian1D.evaluate(x, amplitude, mean, stddev)
|
GaussianAbsorption1D model function.
|
def load_deploy_config(deploy_filename, config=None):
'''
Loads any local config overrides in the deploy file.
'''
if not config:
config = Config()
if not deploy_filename:
return
if path.exists(deploy_filename):
extract_file_config(deploy_filename, config)
return config
|
Loads any local config overrides in the deploy file.
|
def removed(name, ruby=None, user=None, gem_bin=None):
'''
Make sure that a gem is not installed.
name
The name of the gem to uninstall
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name not in __salt__['gem.list'](name, ruby, gem_bin=gem_bin, runas=user):
ret['result'] = True
ret['comment'] = 'Gem is not installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The gem {0} would have been removed'.format(name)
return ret
if __salt__['gem.uninstall'](name, ruby, gem_bin=gem_bin, runas=user):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Gem was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove gem.'
return ret
|
Make sure that a gem is not installed.
name
The name of the gem to uninstall
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
|
def sb_filter(fastq, bc, cores, nedit):
''' Filters reads with non-matching sample barcodes
Expects formatted fastq files.
'''
barcodes = set(sb.strip() for sb in bc)
if nedit == 0:
filter_sb = partial(exact_sample_filter2, barcodes=barcodes)
else:
barcodehash = MutationHash(barcodes, nedit)
filter_sb = partial(correcting_sample_filter2, barcodehash=barcodehash)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(filter_sb, list(bigchunk)):
for read in chunk:
sys.stdout.write(read)
|
Filters reads with non-matching sample barcodes
Expects formatted fastq files.
|
def paramnames(co):
"""
Get the parameter names from a pycode object.
Returns a 4-tuple of (args, kwonlyargs, varargs, varkwargs).
varargs and varkwargs will be None if the function doesn't take *args or
**kwargs, respectively.
"""
flags = co.co_flags
varnames = co.co_varnames
argcount, kwonlyargcount = co.co_argcount, co.co_kwonlyargcount
total = argcount + kwonlyargcount
args = varnames[:argcount]
kwonlyargs = varnames[argcount:total]
varargs, varkwargs = None, None
if flags & Flag.CO_VARARGS:
varargs = varnames[total]
total += 1
if flags & Flag.CO_VARKEYWORDS:
varkwargs = varnames[total]
return args, kwonlyargs, varargs, varkwargs
|
Get the parameter names from a pycode object.
Returns a 4-tuple of (args, kwonlyargs, varargs, varkwargs).
varargs and varkwargs will be None if the function doesn't take *args or
**kwargs, respectively.
|
def set_manip(self, manip, ovs_sm=None, ovs_lg=None, name='A manipulatable'):
"""stub"""
o3d_manip_id = self.create_o3d_asset(manip,
small_ov_set=ovs_sm,
large_ov_set=ovs_lg,
display_name=name)
self.set_manip_id(o3d_manip_id)
|
stub
|
def cluster(points, radius):
"""
Clustering of points in space.
`radius` is the radius of local search.
Individual subsets can be accessed through ``actor.clusters``.
.. hint:: |clustering| |clustering.py|_
"""
if isinstance(points, vtk.vtkActor):
poly = points.GetMapper().GetInput()
else:
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(points))
src.Update()
vpts = src.GetOutput().GetPoints()
for i, p in enumerate(points):
vpts.SetPoint(i, p)
poly = src.GetOutput()
cluster = vtk.vtkEuclideanClusterExtraction()
cluster.SetInputData(poly)
cluster.SetExtractionModeToAllClusters()
cluster.SetRadius(radius)
cluster.ColorClustersOn()
cluster.Update()
idsarr = cluster.GetOutput().GetPointData().GetArray("ClusterId")
Nc = cluster.GetNumberOfExtractedClusters()
sets = [[] for i in range(Nc)]
for i, p in enumerate(points):
sets[idsarr.GetValue(i)].append(p)
acts = []
for i, aset in enumerate(sets):
acts.append(vs.Points(aset, c=i))
actor = Assembly(acts)
actor.info["clusters"] = sets
print("Nr. of extracted clusters", Nc)
if Nc > 10:
print("First ten:")
for i in range(Nc):
if i > 9:
print("...")
break
print("Cluster #" + str(i) + ", N =", len(sets[i]))
print("Access individual clusters through attribute: actor.cluster")
return actor
|
Clustering of points in space.
`radius` is the radius of local search.
Individual subsets can be accessed through ``actor.clusters``.
.. hint:: |clustering| |clustering.py|_
|
def multipublish(self, topic, messages, block=True, timeout=None,
raise_error=True):
"""Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
"""
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.multipublish(topic, messages)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result
|
Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
|
def execute_cross_join(op, left, right, **kwargs):
"""Execute a cross join in pandas.
Notes
-----
We create a dummy column of all :data:`True` instances and use that as the
join key. This results in the desired Cartesian product behavior guaranteed
by cross join.
"""
# generate a unique name for the temporary join key
key = "cross_join_{}".format(ibis.util.guid())
join_key = {key: True}
new_left = left.assign(**join_key)
new_right = right.assign(**join_key)
# inner/outer doesn't matter because every row matches every other row
result = pd.merge(
new_left,
new_right,
how='inner',
on=key,
copy=False,
suffixes=constants.JOIN_SUFFIXES,
)
# remove the generated key
del result[key]
return result
|
Execute a cross join in pandas.
Notes
-----
We create a dummy column of all :data:`True` instances and use that as the
join key. This results in the desired Cartesian product behavior guaranteed
by cross join.
|
def diff(local_path, remote_path):
"""Return true if local and remote paths differ in contents"""
with hide('commands'):
if isinstance(local_path, basestring):
with open(local_path) as stream:
local_content = stream.read()
else:
pos = local_path.tell()
local_content = local_path.read()
local_path.seek(pos)
remote_content = StringIO()
with settings(hide('warnings'), warn_only=True):
if get(remote_path, remote_content).failed:
return True
return local_content.strip() != remote_content.getvalue().strip()
|
Return true if local and remote paths differ in contents
|
def _le_from_lt(self, other):
"""Return a <= b. Computed by @total_ordering from (a < b) or (a == b)."""
op_result = self.__lt__(other)
return op_result or self == other
|
Return a <= b. Computed by @total_ordering from (a < b) or (a == b).
|
def polish(commit_indexes=None, urls=None):
'''
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to
'''
def decorator(f):
if commit_indexes:
f.polish_commit_indexes = commit_indexes
if urls:
f.polish_urls = urls
@wraps(f)
def wrappee(*args, **kwargs):
return f(*args, **kwargs)
return wrappee
return decorator
|
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to
|
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
|
Create exception subclass.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
|
def set_monitor(self, monitor):
"""Set the monitor config.
This module assumes that users will connect to the roaster and get
reading information _before_ they want to begin collecting roast
details. This method is critical to enabling the collection of roast
information and ensuring it gets saved in memory.
:param monitor: Value to set the monitor
:type monitor: bool
:returns: None
:raises: InvalidInput
"""
if type(monitor) != bool:
raise InvalidInput("Monitor value must be bool")
self._roast['record'] = bool2int(monitor)
self._q.put(self._config)
if self._roast['record']:
self._roast_start = now_time(str=True)
self._roast['start_time'] = self._roast_start
else:
self._roast_end = now_time(str=True)
self._roast['end_time'] = self._roast_end
self._roast['date'] = now_date(str=True)
et = load_time(self._roast['end_time'])
st = load_time(self._roast['start_time'])
self._roast['duration'] = timedelta2period(et - st)
return self.get_roast_properties()
|
Set the monitor config.
This module assumes that users will connect to the roaster and get
reading information _before_ they want to begin collecting roast
details. This method is critical to enabling the collection of roast
information and ensuring it gets saved in memory.
:param monitor: Value to set the monitor
:type monitor: bool
:returns: None
:raises: InvalidInput
|
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
|
Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
|
def _WorkerCommand_launcher(self):
"""Return list commands to start the bootstrap process"""
return [
self.workersArguments.pythonExecutable,
'-m',
'scoop.launch.__main__',
str(self.workerAmount),
str(self.workersArguments.verbose),
]
|
Return list commands to start the bootstrap process
|
def compute(self, x, yerr=0.0, **kwargs):
"""
Pre-compute the covariance matrix and factorize it for a set of times
and uncertainties.
:param x: ``(nsamples,)`` or ``(nsamples, ndim)``
The independent coordinates of the data points.
:param yerr: (optional) ``(nsamples,)`` or scalar
The Gaussian uncertainties on the data points at coordinates
``x``. These values will be added in quadrature to the diagonal of
the covariance matrix.
"""
# Parse the input coordinates and ensure the right memory layout.
self._x = self.parse_samples(x)
self._x = np.ascontiguousarray(self._x, dtype=np.float64)
try:
self._yerr2 = float(yerr)**2 * np.ones(len(x))
except TypeError:
self._yerr2 = self._check_dimensions(yerr) ** 2
self._yerr2 = np.ascontiguousarray(self._yerr2, dtype=np.float64)
# Set up and pre-compute the solver.
self.solver = self.solver_type(self.kernel, **(self.solver_kwargs))
# Include the white noise term.
yerr = np.sqrt(self._yerr2 + np.exp(self._call_white_noise(self._x)))
self.solver.compute(self._x, yerr, **kwargs)
self._const = -0.5 * (len(self._x) * np.log(2 * np.pi) +
self.solver.log_determinant)
self.computed = True
self._alpha = None
|
Pre-compute the covariance matrix and factorize it for a set of times
and uncertainties.
:param x: ``(nsamples,)`` or ``(nsamples, ndim)``
The independent coordinates of the data points.
:param yerr: (optional) ``(nsamples,)`` or scalar
The Gaussian uncertainties on the data points at coordinates
``x``. These values will be added in quadrature to the diagonal of
the covariance matrix.
|
def write_electrodes(self, filename):
"""
Write X Y coordinates of electrodes
"""
fid = open(filename, 'w')
for i in self.Electrodes:
fid.write('{0} {1}\n'.format(self.Points[i][0], self.Points[i][1]))
fid.close()
|
Write X Y coordinates of electrodes
|
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
|
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
|
def tag_add(self, *tags):
""" Return a view with the specified tags added """
return View({**self.spec, 'tag': list(set(self.tags) | set(tags))})
|
Return a view with the specified tags added
|
def list_upgrades(refresh=True, backtrack=3, **kwargs): # pylint: disable=W0613
'''
List all available package upgrades.
refresh
Whether or not to sync the portage tree before checking for upgrades.
backtrack
Specifies an integer number of times to backtrack if dependency
calculation fails due to a conflict or an unsatisfied dependency
(default: ´3´).
.. versionadded: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if salt.utils.data.is_true(refresh):
refresh_db()
return _get_upgradable(backtrack)
|
List all available package upgrades.
refresh
Whether or not to sync the portage tree before checking for upgrades.
backtrack
Specifies an integer number of times to backtrack if dependency
calculation fails due to a conflict or an unsatisfied dependency
(default: ´3´).
.. versionadded: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
|
def load_card(self, code, cache=True):
"""
Load a card with the given code from the database. This calls each
save event hook on the save string before commiting it to the database.
Will cache each resulting card for faster future lookups with this
method while respecting the libraries cache limit. However only if the
cache argument is True.
Will return None if the card could not be loaded.
"""
card = self.card_cache.get(code, None)
if card is None:
code = code if isinstance(code, str) else str(code)
with sqlite3.connect(self.dbname) as carddb:
result = carddb.execute(
"SELECT * FROM CARDS WHERE code = ?", (code,))
loadrow = result.fetchone()
if not loadrow:
return None
loaddict = dict(zip(FIELDS, loadrow))
card = self.cardclass(loaddict=loaddict)
if cache:
self.cache_card(card)
return card
|
Load a card with the given code from the database. This calls each
save event hook on the save string before commiting it to the database.
Will cache each resulting card for faster future lookups with this
method while respecting the libraries cache limit. However only if the
cache argument is True.
Will return None if the card could not be loaded.
|
def get_metadata(address=None, tx_hash=None, block_hash=None, api_key=None, private=True, coin_symbol='btc'):
'''
Get metadata using blockcypher's API.
This is data on blockcypher's servers and not embedded into the bitcoin (or other) blockchain.
'''
assert is_valid_coin_symbol(coin_symbol), coin_symbol
assert api_key or not private, 'Cannot see private metadata without an API key'
kwarg = get_valid_metadata_identifier(
coin_symbol=coin_symbol,
address=address,
tx_hash=tx_hash,
block_hash=block_hash,
)
url = make_url(coin_symbol, meta=True, **kwarg)
params = {'token': api_key} if api_key else {'private': 'true'}
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
response_dict = get_valid_json(r)
return response_dict
|
Get metadata using blockcypher's API.
This is data on blockcypher's servers and not embedded into the bitcoin (or other) blockchain.
|
def sort_seeds(uhandle, usort):
""" sort seeds from cluster results"""
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
proc.communicate()
|
sort seeds from cluster results
|
def get_dependency_graph(self):
"""Generate the dependency graph.
The dependency graph is a simpler subset of the resolve graph. It
contains package name nodes connected directly to their dependencies.
Weak references and conflict requests are not included in the graph.
The dependency graph does not show conflicts.
Returns:
`pygraph.digraph` object.
"""
from rez.vendor.pygraph.classes.digraph import digraph
nodes = {}
edges = set()
for variant in self._resolved_packages:
nodes[variant.name] = variant.qualified_package_name
for request in variant.get_requires():
if not request.conflict:
edges.add((variant.name, request.name))
g = digraph()
node_color = "#AAFFAA"
node_fontsize = 10
attrs = [("fontsize", node_fontsize),
("fillcolor", node_color),
("style", "filled")]
for name, qname in nodes.iteritems():
g.add_node(name, attrs=attrs + [("label", qname)])
for edge in edges:
g.add_edge(edge)
return g
|
Generate the dependency graph.
The dependency graph is a simpler subset of the resolve graph. It
contains package name nodes connected directly to their dependencies.
Weak references and conflict requests are not included in the graph.
The dependency graph does not show conflicts.
Returns:
`pygraph.digraph` object.
|
def do_cleanup(self, subcmd, opts, *args):
"""Recursively clean up the working copy, removing locks, resuming
unfinished operations, etc.
usage:
cleanup [PATH...]
${cmd_option_list}
"""
print "'svn %s' opts: %s" % (subcmd, opts)
print "'svn %s' args: %s" % (subcmd, args)
|
Recursively clean up the working copy, removing locks, resuming
unfinished operations, etc.
usage:
cleanup [PATH...]
${cmd_option_list}
|
def getEffort(self, vehID, time, edgeID):
"""getEffort(string, double, string) -> double
.
"""
self._connection._beginMessage(tc.CMD_GET_VEHICLE_VARIABLE,
tc.VAR_EDGE_EFFORT, vehID, 1 + 4 + 1 + 4 + 1 + 4 + len(edgeID))
self._connection._string += struct.pack(
"!BiBi", tc.TYPE_COMPOUND, 2, tc.TYPE_INTEGER, time)
self._connection._packString(edgeID)
return self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.VAR_EDGE_EFFORT, vehID).readDouble()
|
getEffort(string, double, string) -> double
.
|
def register_command(self, name: str, f: Callable):
"""Registers an existing callable object as a command callback
This method can be used instead of the ``@command`` decorator. Both
do the same thing, but this method is useful for registering callbacks
for methods defined before or outside the scope of your bot object,
allowing you to define methods in another file or wherever, import them,
and register them.
See the documentation for the ``@command`` decorator for more information
on what you method will receive.
Example:
def process_hello(data):
# do stuff
# later, somewhere else, etc.
pycord.register_command('hello', process_hello)
Args:
name: the command to trigger the callback (see ``@command`` documentation)
f: callable that will be triggered on command processing
"""
self._commands.append((name, f))
|
Registers an existing callable object as a command callback
This method can be used instead of the ``@command`` decorator. Both
do the same thing, but this method is useful for registering callbacks
for methods defined before or outside the scope of your bot object,
allowing you to define methods in another file or wherever, import them,
and register them.
See the documentation for the ``@command`` decorator for more information
on what you method will receive.
Example:
def process_hello(data):
# do stuff
# later, somewhere else, etc.
pycord.register_command('hello', process_hello)
Args:
name: the command to trigger the callback (see ``@command`` documentation)
f: callable that will be triggered on command processing
|
def clear(self):
"""Clears this instance's cache."""
if self._cache is not None:
with self._cache as c, self._out as out:
self.in_flush_all = True
c.clear()
out.clear() # pylint: disable=no-member
self.in_flush_all = False
|
Clears this instance's cache.
|
def get_element_attribute(elem_to_parse, attrib_name, default_value=u''):
"""
:return: an attribute from the parsed element if it has the attribute,
otherwise the default value
"""
element = get_element(elem_to_parse)
if element is None:
return default_value
return element.attrib.get(attrib_name, default_value)
|
:return: an attribute from the parsed element if it has the attribute,
otherwise the default value
|
def _build_basic_context(self):
"""
Return a standard dict used in django as a template context
"""
# printDebug(str(self.ontospy_graph.toplayer_classes))
topclasses = self.ontospy_graph.toplayer_classes[:]
if len(topclasses) < 3: # massage the toplayer!
for topclass in self.ontospy_graph.toplayer_classes:
for child in topclass.children():
if child not in topclasses: topclasses.append(child)
if not self.static_url:
self.static_url = "static/" # default
context_data = {
"STATIC_URL": self.static_url,
"ontodocs_version": VERSION,
"ontospy_graph": self.ontospy_graph,
"topclasses": topclasses,
"docs_title": self.title,
"namespaces": self.ontospy_graph.namespaces,
"stats": self.ontospy_graph.stats(),
"sources": self.ontospy_graph.sources,
"ontologies": self.ontospy_graph.all_ontologies,
"classes": self.ontospy_graph.all_classes,
"properties": self.ontospy_graph.all_properties,
"objproperties": self.ontospy_graph.all_properties_object,
"dataproperties": self.ontospy_graph.all_properties_datatype,
"annotationproperties":
self.ontospy_graph.all_properties_annotation,
"skosConcepts": self.ontospy_graph.all_skos_concepts,
"instances": []
}
return context_data
|
Return a standard dict used in django as a template context
|
def realpath(self, spec, key):
"""
Resolve and update the path key in the spec with its realpath,
based on the working directory.
"""
if key not in spec:
# do nothing for now
return
if not spec[key]:
logger.warning(
"cannot resolve realpath of '%s' as it is not defined", key)
return
check = realpath(join(spec.get(WORKING_DIR, ''), spec[key]))
if check != spec[key]:
spec[key] = check
logger.warning(
"realpath of '%s' resolved to '%s', spec is updated",
key, check
)
return check
|
Resolve and update the path key in the spec with its realpath,
based on the working directory.
|
def unpackVersion(ver):
'''
Unpack a system normalized integer representing a softare version into its component parts.
Args:
ver (int): System normalized integer value to unpack into a tuple.
Returns:
(int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
'''
major = (ver >> 20 * 2) & mask20
minor = (ver >> 20) & mask20
patch = ver & mask20
return major, minor, patch
|
Unpack a system normalized integer representing a softare version into its component parts.
Args:
ver (int): System normalized integer value to unpack into a tuple.
Returns:
(int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
|
def getRecommendedRenderTargetSize(self):
"""Suggested size for the intermediate render target that the distortion pulls from."""
fn = self.function_table.getRecommendedRenderTargetSize
pnWidth = c_uint32()
pnHeight = c_uint32()
fn(byref(pnWidth), byref(pnHeight))
return pnWidth.value, pnHeight.value
|
Suggested size for the intermediate render target that the distortion pulls from.
|
def _init_metadata(self):
"""stub"""
super(PDFPreviewFormRecord, self)._init_metadata()
self._preview_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'file'),
'element_label': 'File',
'instructions': 'accepts an Asset Id',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': []
}
|
stub
|
def filename_add_custom_url_params(filename, request):
""" Adds custom url parameters to filename string
:param filename: Initial filename
:type filename: str
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:return: Filename with custom url parameters in the name
:rtype: str
"""
if hasattr(request, 'custom_url_params') and request.custom_url_params is not None:
for param, value in sorted(request.custom_url_params.items(),
key=lambda parameter_item: parameter_item[0].value):
filename = '_'.join([filename, param.value, str(value)])
return filename
|
Adds custom url parameters to filename string
:param filename: Initial filename
:type filename: str
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:return: Filename with custom url parameters in the name
:rtype: str
|
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
|
Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
|
def mkdirs(remote_dir, use_sudo=False):
"""
Wrapper around mkdir -pv
Returns a list of directories created
"""
func = use_sudo and sudo or run
result = func(' '.join(['mkdir -pv',remote_dir])).split('\n')
#extract dir list from ["mkdir: created directory `example.com/some/dir'"]
if result[0]: result = [dir.split(' ')[3][1:-1] for dir in result if dir]
return result
|
Wrapper around mkdir -pv
Returns a list of directories created
|
def render_binary(self, context, result):
"""Return binary responses unmodified."""
context.response.app_iter = iter((result, )) # This wraps the binary string in a WSGI body iterable.
return True
|
Return binary responses unmodified.
|
def map_pixel(point_x, point_y, cellx, celly, xmin, ymax):
'''
Usage:
map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax)
where:
xmin is leftmost X coordinate in system
ymax is topmost Y coordinate in system
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
row, col = map_pixel(x,y,geot[1],geot[-1], geot[0],geot[3])
'''
point_x = np.asarray(point_x)
point_y = np.asarray(point_y)
col = np.floor((point_x - xmin) / cellx).astype(int)
row = np.floor((point_y - ymax) / celly).astype(int)
return row, col
|
Usage:
map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax)
where:
xmin is leftmost X coordinate in system
ymax is topmost Y coordinate in system
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
row, col = map_pixel(x,y,geot[1],geot[-1], geot[0],geot[3])
|
def element_to_object(elem_to_parse, element_path=None):
"""
:return: the root key, and a dict with all the XML data, but without preserving structure, for instance:
<elem val="attribute"><val>nested text</val><val prop="attr">nested dict text</val>nested dict tail</elem>
{'elem': {
'val': [
u'nested text',
{'prop': u'attr', 'value': [u'nested dict text', u'nested dict tail']},
u'attribute'
]
}}
"""
if isinstance(elem_to_parse, STRING_TYPES) or hasattr(elem_to_parse, 'read'):
# Always strip namespaces if not already parsed
elem_to_parse = strip_namespaces(elem_to_parse)
if element_path is not None:
elem_to_parse = get_element(elem_to_parse, element_path)
element_tree = get_element_tree(elem_to_parse)
element_root = element_tree.getroot()
root_tag = u'' if element_root is None else element_root.tag
return root_tag, {root_tag: _element_to_object(element_root)}
|
:return: the root key, and a dict with all the XML data, but without preserving structure, for instance:
<elem val="attribute"><val>nested text</val><val prop="attr">nested dict text</val>nested dict tail</elem>
{'elem': {
'val': [
u'nested text',
{'prop': u'attr', 'value': [u'nested dict text', u'nested dict tail']},
u'attribute'
]
}}
|
def open_state_machine(path=None, recent_opened_notification=False):
""" Open a state machine from respective file system path
:param str path: file system path to the state machine
:param bool recent_opened_notification: flags that indicates that this call also should update recently open
:rtype rafcon.core.state_machine.StateMachine
:return: opened state machine
"""
start_time = time.time()
if path is None:
if interface.open_folder_func is None:
logger.error("No function defined for opening a folder")
return
load_path = interface.open_folder_func("Please choose the folder of the state machine")
if load_path is None:
return
else:
load_path = path
if state_machine_manager.is_state_machine_open(load_path):
logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path))
sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id
return state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
state_machine = None
try:
state_machine = storage.load_state_machine_from_path(load_path)
state_machine_manager.add_state_machine(state_machine)
if recent_opened_notification:
global_runtime_config.update_recently_opened_state_machines_with(state_machine)
duration = time.time() - start_time
stat = state_machine.root_state.get_states_statistics(0)
logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1]))
except (AttributeError, ValueError, IOError) as e:
logger.error('Error while trying to open state machine: {0}'.format(e))
return state_machine
|
Open a state machine from respective file system path
:param str path: file system path to the state machine
:param bool recent_opened_notification: flags that indicates that this call also should update recently open
:rtype rafcon.core.state_machine.StateMachine
:return: opened state machine
|
def validate_cookies(session, class_name):
"""
Checks whether we have all the required cookies
to authenticate on class.coursera.org. Also check for and remove
stale session.
"""
if not do_we_have_enough_cookies(session.cookies, class_name):
return False
url = CLASS_URL.format(class_name=class_name) + '/class'
r = session.head(url, allow_redirects=False)
if r.status_code == 200:
return True
else:
logging.debug('Stale session.')
try:
session.cookies.clear('.coursera.org')
except KeyError:
pass
return False
|
Checks whether we have all the required cookies
to authenticate on class.coursera.org. Also check for and remove
stale session.
|
def create_lbaas_member(self, lbaas_pool, body=None):
"""Creates a lbaas_member."""
return self.post(self.lbaas_members_path % lbaas_pool, body=body)
|
Creates a lbaas_member.
|
def process_presence(self, stanza):
"""Process presence stanza.
Pass it to a handler of the stanza's type and payload namespace.
:Parameters:
- `stanza`: presence stanza to be handled
"""
stanza_type = stanza.stanza_type
return self.__try_handlers(self._presence_handlers, stanza, stanza_type)
|
Process presence stanza.
Pass it to a handler of the stanza's type and payload namespace.
:Parameters:
- `stanza`: presence stanza to be handled
|
def call_status(*args, **kwargs):
'''
Return the status of the lamps.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
CLI Example:
.. code-block:: bash
salt '*' hue.status
salt '*' hue.status id=1
salt '*' hue.status id=1,2,3
'''
res = dict()
devices = _get_lights()
for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):
dev_id = six.text_type(dev_id)
res[dev_id] = {
'on': devices[dev_id]['state']['on'],
'reachable': devices[dev_id]['state']['reachable']
}
return res
|
Return the status of the lamps.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
CLI Example:
.. code-block:: bash
salt '*' hue.status
salt '*' hue.status id=1
salt '*' hue.status id=1,2,3
|
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
|
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
|
def techport(Id):
'''
In order to use this capability, queries can be issued to the system with the following URI
format:
GET /xml-api/id_parameter
Parameter Required? Value Description
id_parameter Yes Type: String
Default: None
The id value of the TechPort record.
TechPort values range from 0-20000.
Not all values will yield results. Id
values can be obtained through the
standard TechPort search feature and
are visible in the website URLs, e.g.
http://techport.nasa.gov/view/0000,
where 0000 is the id value.
Example usage:
http://techport.nasa.gov/xml-api/4795
Output: The output of this query is an XML file with all field data of the TechPort record.
'''
base_url = 'http://techport.nasa.gov/xml-api/'
if not isinstance(Id, str):
raise ValueError("The Id arg you provided is not the type of str")
else:
base_url += Id
return dispatch_http_get(base_url)
|
In order to use this capability, queries can be issued to the system with the following URI
format:
GET /xml-api/id_parameter
Parameter Required? Value Description
id_parameter Yes Type: String
Default: None
The id value of the TechPort record.
TechPort values range from 0-20000.
Not all values will yield results. Id
values can be obtained through the
standard TechPort search feature and
are visible in the website URLs, e.g.
http://techport.nasa.gov/view/0000,
where 0000 is the id value.
Example usage:
http://techport.nasa.gov/xml-api/4795
Output: The output of this query is an XML file with all field data of the TechPort record.
|
def validate(self, generator, axesToMove=None, **kwargs):
# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams
"""Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
"""
iterations = 10
# We will return this, so make sure we fill in defaults
for k, default in self._block.configure.defaults.items():
if k not in kwargs:
kwargs[k] = default
# The validated parameters we will eventually return
params = ConfigureParams(generator, axesToMove, **kwargs)
# Make some tasks just for validate
part_contexts = self.create_part_contexts()
# Get any status from all parts
status_part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in part_contexts.items())
while iterations > 0:
# Try up to 10 times to get a valid set of parameters
iterations -= 1
# Validate the params with all the parts
validate_part_info = self.run_hooks(
ValidateHook(p, c, status_part_info, **kwargs)
for p, c, kwargs in self._part_params(part_contexts, params))
tweaks = ParameterTweakInfo.filter_values(validate_part_info)
if tweaks:
for tweak in tweaks:
deserialized = self._block.configure.takes.elements[
tweak.parameter].validate(tweak.value)
setattr(params, tweak.parameter, deserialized)
self.log.debug(
"Tweaking %s to %s", tweak.parameter, deserialized)
else:
# Consistent set, just return the params
return params
raise ValueError("Could not get a consistent set of parameters")
|
Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
|
def _watchdog_queue(self):
"""
从queue里取出字符执行命令
"""
while not self.quit:
k = self.queue.get()
if k == self.keys['QUIT']: # 退出
self.quit = True
self.switch_queue.put('main')
elif k == self.keys['BYE']:
self.data.bye()
self.player.start_queue(self)
elif k == self.keys['LOOP']: # 单曲循环
self.set_loop()
elif k == self.keys['RATE']: # 加心/去心
self.set_rate()
elif k == self.keys['OPENURL']: # 打开当前歌曲豆瓣专辑
self.set_url()
elif k == self.keys['HIGH']: # 高品质音乐
self.set_high()
elif k == self.keys['PAUSE']: # 暂停
self.set_pause()
elif k == self.keys['NEXT']: # 下一首
self.player.next()
elif k == '-' or k == '_': # 减小音量
self.set_volume(-1)
elif k == '+' or k == '=': # 增大音量
self.set_volume(1)
elif k == self.keys['MUTE']: # 静音
self.set_mute()
elif k in ['1', '2', '3', '4']: # 主题选取
self.set_theme(k)
elif k == self.keys['UP'] or k == 'B': # 向下
self.up()
elif k == self.keys['DOWN'] or k == 'A': # 向上
self.down()
|
从queue里取出字符执行命令
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.