code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
cache_directory: str = None,
cache_prefix: str = None) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
parameter_filename : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
cache_directory : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
cache_prefix : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides)
return train_model(params,
serialization_dir,
file_friendly_logging,
recover,
force,
cache_directory, cache_prefix)
|
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
parameter_filename : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
cache_directory : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
cache_prefix : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
|
def statement(self):
"""
statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement
"""
if self.cur_token.type == TokenTypes.VAR:
self.tokenizer.start_saving(self.cur_token)
self.variable()
peek_var = self.cur_token
self.tokenizer.replay()
self.eat()
if peek_var.type == TokenTypes.ASSIGN:
return self.assign_statement()
else:
return self.expression()
elif self.cur_token.type in TokenTypes.control(self.features):
return self.control()
elif self.cur_token.type in TokenTypes.loop(self.features):
return self.loop()
elif self.cur_token.type in TokenTypes.func(self.features):
if self.cur_token.type == TokenTypes.FUNC:
return self.func()
elif self.cur_token.type == TokenTypes.RETURN:
return self.return_statement()
self.error("Invalid token or unfinished statement")
|
statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement
|
def try_write(wd_item, record_id, record_prop, login, edit_summary='', write=True):
"""
Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise
"""
if wd_item.require_write:
if wd_item.create_new_item:
msg = "CREATE"
else:
msg = "UPDATE"
else:
msg = "SKIP"
try:
if write:
wd_item.write(login=login, edit_summary=edit_summary)
wdi_core.WDItemEngine.log("INFO", format_msg(record_id, record_prop, wd_item.wd_item_id, msg) + ";" + str(
wd_item.lastrevid))
except wdi_core.WDApiError as e:
print(e)
wdi_core.WDItemEngine.log("ERROR",
format_msg(record_id, record_prop, wd_item.wd_item_id, json.dumps(e.wd_error_msg),
type(e)))
return e
except Exception as e:
print(e)
wdi_core.WDItemEngine.log("ERROR", format_msg(record_id, record_prop, wd_item.wd_item_id, str(e), type(e)))
return e
return True
|
Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise
|
def force_delete(self):
"""
Force a hard delete on a soft deleted model.
"""
self._force_deleting = True
self.delete()
self._force_deleting = False
|
Force a hard delete on a soft deleted model.
|
def rfft2d_freqs(h, w):
"""Computes 2D spectrum frequencies."""
fy = np.fft.fftfreq(h)[:, None]
# when we have an odd input dimension we need to keep one additional
# frequency and later cut off 1 pixel
if w % 2 == 1:
fx = np.fft.fftfreq(w)[: w // 2 + 2]
else:
fx = np.fft.fftfreq(w)[: w // 2 + 1]
return np.sqrt(fx * fx + fy * fy)
|
Computes 2D spectrum frequencies.
|
def patch_context(self, context):
"""
Patches the context to add utility functions
Sets up the base_url, and the get_url() utility function.
"""
context.__class__ = PatchedContext
# Simply setting __class__ directly doesn't work
# because behave.runner.Context.__setattr__ is implemented wrongly.
object.__setattr__(context, '__class__', PatchedContext)
|
Patches the context to add utility functions
Sets up the base_url, and the get_url() utility function.
|
def get_colours(color_group, color_name, reverse=False):
color_group = color_group.lower()
cmap = get_map(color_group, color_name, reverse=reverse)
return cmap.hex_colors
"""
if not reverse:
return cmap.hex_colors
else:
return cmap.hex_colors[::-1]
"""
|
if not reverse:
return cmap.hex_colors
else:
return cmap.hex_colors[::-1]
|
def _parse_args(cls):
"""
Method to parse command line arguments
"""
cls.parser = argparse.ArgumentParser()
cls.parser.add_argument(
"symbol", help="Symbol for horizontal line", nargs="*")
cls.parser.add_argument(
"--color", "-c", help="Color of the line", default=None, nargs=1)
cls.parser.add_argument(
"--version", "-v", action="version", version="0.13")
return cls.parser
|
Method to parse command line arguments
|
def get_as_map(self, key):
"""
Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported.
"""
value = self.get(key)
return AnyValueMap.from_value(value)
|
Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported.
|
def register_cli_argument(self, scope, dest, argtype, **kwargs):
""" Add an argument to the argument registry
:param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand')
:type scope: str
:param dest: The parameter/destination that this argument is for
:type dest: str
:param argtype: The argument type for this command argument
:type argtype: knack.arguments.CLIArgumentType
:param kwargs: see knack.arguments.CLIArgumentType
"""
argument = CLIArgumentType(overrides=argtype, **kwargs)
self.arguments[scope][dest] = argument
|
Add an argument to the argument registry
:param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand')
:type scope: str
:param dest: The parameter/destination that this argument is for
:type dest: str
:param argtype: The argument type for this command argument
:type argtype: knack.arguments.CLIArgumentType
:param kwargs: see knack.arguments.CLIArgumentType
|
def asbool(value):
"""Function used to convert certain string values into an appropriated
boolean value.If value is not a string the built-in python
bool function will be used to convert the passed parameter
:param value: an object to be converted to a boolean value
:returns: A boolean value
"""
is_string = isinstance(value, string_types)
if is_string:
value = value.strip().lower()
if value in ('true', 'yes', 'on', 'y', 't', '1',):
return True
elif value in ('false', 'no', 'off', 'n', 'f', '0'):
return False
else:
raise ValueError("String is not true/false: %r" % value)
else:
return bool(value)
|
Function used to convert certain string values into an appropriated
boolean value.If value is not a string the built-in python
bool function will be used to convert the passed parameter
:param value: an object to be converted to a boolean value
:returns: A boolean value
|
def get_amplification_factors(self, imt, sctx, rctx, dists, stddev_types):
"""
Returns the amplification factors for the given rupture and site
conditions.
:param imt:
Intensity measure type as an instance of the :class:
`openquake.hazardlib.imt`
:param sctx:
SiteCollection instance
:param rctx:
Rupture instance
:param dists:
Source to site distances (km)
:param stddev_types:
List of required standard deviation types
:returns:
* mean_amp - Amplification factors applied to the median ground
motion
* sigma_amps - List of modification factors applied to the
standard deviations of ground motion
"""
dist_level_table = self.get_mean_table(imt, rctx)
sigma_tables = self.get_sigma_tables(imt, rctx, stddev_types)
mean_interpolator = interp1d(self.values,
numpy.log10(dist_level_table),
axis=1)
sigma_interpolators = [interp1d(self.values, sigma_table, axis=1)
for sigma_table in sigma_tables]
if self.element == "Rupture":
mean_amp = 10.0 ** mean_interpolator(
getattr(rctx, self.parameter))[0] * numpy.ones_like(dists)
sigma_amps = []
for sig_interpolator in sigma_interpolators:
sigma_amps.append(sig_interpolator(
getattr(rctx, self.parameter))[0] * numpy.ones_like(dists))
else:
mean_amp = 10.0 ** mean_interpolator(
getattr(sctx, self.parameter))[0, :]
sigma_amps = []
for sig_interpolator in sigma_interpolators:
sigma_amps.append(sig_interpolator(
getattr(sctx, self.parameter))[0, :] *
numpy.ones_like(dists))
return mean_amp, sigma_amps
|
Returns the amplification factors for the given rupture and site
conditions.
:param imt:
Intensity measure type as an instance of the :class:
`openquake.hazardlib.imt`
:param sctx:
SiteCollection instance
:param rctx:
Rupture instance
:param dists:
Source to site distances (km)
:param stddev_types:
List of required standard deviation types
:returns:
* mean_amp - Amplification factors applied to the median ground
motion
* sigma_amps - List of modification factors applied to the
standard deviations of ground motion
|
def label(self, input_grid):
"""
Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
Returns:
Array of labeled pixels
"""
marked = self.find_local_maxima(input_grid)
marked = np.where(marked >= 0, 1, 0)
# splabel returns two things in a tuple: an array and an integer
# assign the first thing (array) to markers
markers = splabel(marked)[0]
return markers
|
Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
Returns:
Array of labeled pixels
|
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,
ignore_query=False, ignore_fragment=False):
"""
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
"""
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
firstp = urlparse(first)
secondp = urlparse(second)
return ((firstp.scheme == secondp.scheme or ignore_scheme) and
(firstp.netloc == secondp.netloc or ignore_netloc) and
(firstp.path == secondp.path or ignore_path) and
(firstp.params == secondp.params or ignore_params) and
(firstp.query == secondp.query or ignore_query) and
(firstp.fragment == secondp.fragment or ignore_fragment))
|
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
|
def append(self, other):
"""Appends stars from another StarPopulations, in place.
:param other:
Another :class:`StarPopulation`; must have same columns as ``self``.
"""
if not isinstance(other,StarPopulation):
raise TypeError('Only StarPopulation objects can be appended to a StarPopulation.')
if not np.all(self.stars.columns == other.stars.columns):
raise ValueError('Two populations must have same columns to combine them.')
if len(self.constraints) > 0:
logging.warning('All constraints are cleared when appending another population.')
self.stars = pd.concat((self.stars, other.stars))
if self.orbpop is not None and other.orbpop is not None:
self.orbpop = self.orbpop + other.orbpop
|
Appends stars from another StarPopulations, in place.
:param other:
Another :class:`StarPopulation`; must have same columns as ``self``.
|
def persist(name, value, config=None):
'''
Assign and persist a simple sysctl parameter for this minion. If ``config``
is not specified, a sensible default will be chosen using
:mod:`sysctl.default_config <salt.modules.linux_sysctl.default_config>`.
CLI Example:
.. code-block:: bash
salt '*' sysctl.persist net.ipv4.ip_forward 1
'''
if config is None:
config = default_config()
edited = False
# If the sysctl.conf is not present, add it
if not os.path.isfile(config):
sysctl_dir = os.path.dirname(config)
if not os.path.exists(sysctl_dir):
os.makedirs(sysctl_dir)
try:
with salt.utils.files.fopen(config, 'w+') as _fh:
_fh.write('#\n# Kernel sysctl configuration\n#\n')
except (IOError, OSError):
msg = 'Could not write to file: {0}'
raise CommandExecutionError(msg.format(config))
# Read the existing sysctl.conf
nlines = []
try:
with salt.utils.files.fopen(config, 'r') as _fh:
# Use readlines because this should be a small file
# and it seems unnecessary to indent the below for
# loop since it is a fairly large block of code.
config_data = salt.utils.data.decode(_fh.readlines())
except (IOError, OSError):
msg = 'Could not read from file: {0}'
raise CommandExecutionError(msg.format(config))
for line in config_data:
if line.startswith('#'):
nlines.append(line)
continue
if '=' not in line:
nlines.append(line)
continue
# Strip trailing whitespace and split the k,v
comps = [i.strip() for i in line.split('=', 1)]
# On Linux procfs, files such as /proc/sys/net/ipv4/tcp_rmem or any
# other sysctl with whitespace in it consistently uses 1 tab. Lets
# allow our users to put a space or tab between multi-value sysctls
# and have salt not try to set it every single time.
if isinstance(comps[1], string_types) and ' ' in comps[1]:
comps[1] = re.sub(r'\s+', '\t', comps[1])
# Do the same thing for the value 'just in case'
if isinstance(value, string_types) and ' ' in value:
value = re.sub(r'\s+', '\t', value)
if len(comps) < 2:
nlines.append(line)
continue
if name == comps[0]:
# This is the line to edit
if six.text_type(comps[1]) == six.text_type(value):
# It is correct in the config, check if it is correct in /proc
if six.text_type(get(name)) != six.text_type(value):
assign(name, value)
return 'Updated'
else:
return 'Already set'
nlines.append('{0} = {1}\n'.format(name, value))
edited = True
continue
else:
nlines.append(line)
if not edited:
nlines.append('{0} = {1}\n'.format(name, value))
try:
with salt.utils.files.fopen(config, 'wb') as _fh:
_fh.writelines(salt.utils.data.encode(nlines))
except (IOError, OSError):
msg = 'Could not write to file: {0}'
raise CommandExecutionError(msg.format(config))
assign(name, value)
return 'Updated'
|
Assign and persist a simple sysctl parameter for this minion. If ``config``
is not specified, a sensible default will be chosen using
:mod:`sysctl.default_config <salt.modules.linux_sysctl.default_config>`.
CLI Example:
.. code-block:: bash
salt '*' sysctl.persist net.ipv4.ip_forward 1
|
def verify_psd_options(opt, parser):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opt, parser, opt_group)
if psd_estimation:
required_opts(opt, parser,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
|
Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
|
def send_screen_to_connection(self, screen):
"""
Actually used for Curses
:param screen:
:return:void
"""
display_who_run_core = self.get_connection_who_have_to_run_core()
if not display_who_run_core:
raise Exception('Need Terminal object to do that')
display_who_run_core.initialize_screen(screen)
|
Actually used for Curses
:param screen:
:return:void
|
def build_acl(self, tenant_name, rule):
"""Build the ACL. """
# TODO(padkrish) actions that is not deny or allow, throw error
if rule['action'] == 'allow':
action = 'permit'
else:
action = 'deny'
acl_str = "access-list %(tenant)s extended %(action)s %(prot)s "
acl = acl_str % {'tenant': tenant_name, 'action': action,
'prot': rule.get('protocol')}
src_ip = self.get_ip_address(rule.get('source_ip_address'))
ip_acl = self.build_acl_ip(src_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('source_port'))
dst_ip = self.get_ip_address(rule.get('destination_ip_address'))
ip_acl = self.build_acl_ip(dst_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('destination_port'),
enabled=rule.get('enabled'))
return acl
|
Build the ACL.
|
def fullscreen(self):
''' Context Manager that enters full-screen mode and restores normal
mode on exit.
::
with screen.fullscreen():
print('Hello, world!')
'''
stream = self._stream
stream.write(self.alt_screen_enable)
stream.write(str(self.save_title(0))) # 0 = both icon, title
stream.flush()
try:
yield self
finally:
stream.write(self.alt_screen_disable)
stream.write(str(self.restore_title(0))) # 0 = icon & title
stream.flush()
|
Context Manager that enters full-screen mode and restores normal
mode on exit.
::
with screen.fullscreen():
print('Hello, world!')
|
def pair(args):
"""
%prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos
"""
p = OptionParser(pair.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
def callback(s):
print(s.pairline)
Sam(args[0], callback=callback)
|
%prog pair samfile
Parses the sam file and retrieve in pairs format,
query:pos ref:pos
|
def _apply_local_transforms(p, ts):
"""
Given a 2d array of single shot results (outer axis iterates over shots, inner axis over bits)
and a list of assignment probability matrices (one for each bit in the readout, ordered like
the inner axis of results) apply local 2x2 matrices to each bit index.
:param np.array p: An array that enumerates a function indexed by bitstrings::
f(ijk...) = p[i,j,k,...]
:param Sequence[np.array] ts: A sequence of 2x2 transform-matrices, one for each bit.
:return: ``p_transformed`` an array with as many dimensions as there are bits with the result of
contracting p along each axis by the corresponding bit transformation.
p_transformed[ijk...] = f'(ijk...) = sum_lmn... ts[0][il] ts[1][jm] ts[2][kn] f(lmn...)
:rtype: np.array
"""
p_corrected = _bitstring_probs_by_qubit(p)
nq = p_corrected.ndim
for idx, trafo_idx in enumerate(ts):
# this contraction pattern looks like
# 'ij,abcd...jklm...->abcd...iklm...' so it properly applies a "local"
# transformation to a single tensor-index without changing the order of
# indices
einsum_pat = ('ij,' + _CHARS[:idx] + 'j' + _CHARS[idx:nq - 1]
+ '->' + _CHARS[:idx] + 'i' + _CHARS[idx:nq - 1])
p_corrected = np.einsum(einsum_pat, trafo_idx, p_corrected)
return p_corrected
|
Given a 2d array of single shot results (outer axis iterates over shots, inner axis over bits)
and a list of assignment probability matrices (one for each bit in the readout, ordered like
the inner axis of results) apply local 2x2 matrices to each bit index.
:param np.array p: An array that enumerates a function indexed by bitstrings::
f(ijk...) = p[i,j,k,...]
:param Sequence[np.array] ts: A sequence of 2x2 transform-matrices, one for each bit.
:return: ``p_transformed`` an array with as many dimensions as there are bits with the result of
contracting p along each axis by the corresponding bit transformation.
p_transformed[ijk...] = f'(ijk...) = sum_lmn... ts[0][il] ts[1][jm] ts[2][kn] f(lmn...)
:rtype: np.array
|
def generateImplicitParameters(obj):
"""
Generate a UID if one does not exist.
This is just a dummy implementation, for now.
"""
if not hasattr(obj, 'uid'):
rand = int(random.random() * 100000)
now = datetime.datetime.now(utc)
now = dateTimeToString(now)
host = socket.gethostname()
obj.add(ContentLine('UID', [], "{0} - {1}@{2}".format(now, rand,
host)))
|
Generate a UID if one does not exist.
This is just a dummy implementation, for now.
|
def find_xref(self, parser):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in parser.revreadlines():
line = line.strip()
if self.debug:
logging.debug('find_xref: %r' % line)
if line == b'startxref':
break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('xref found: pos=%r' % prev)
return long(prev)
|
Internal function used to locate the first XRef.
|
def getch():
"""
get character. waiting for key
"""
try:
termios.tcsetattr(_fd, termios.TCSANOW, _new_settings)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings)
return ch
|
get character. waiting for key
|
def RunValidation(feed, options, problems):
"""Validate feed, returning the loaded Schedule and exit code.
Args:
feed: GTFS file, either path of the file as a string or a file object
options: options object returned by optparse
problems: transitfeed.ProblemReporter instance
Returns:
a transitfeed.Schedule object, exit code and plain text string of other
problems
Exit code is 2 if an extension is provided but can't be loaded, 1 if
problems are found and 0 if the Schedule is problem free.
plain text string is '' if no other problems are found.
"""
util.CheckVersion(problems, options.latest_version)
# TODO: Add tests for this flag in testfeedvalidator.py
if options.extension:
try:
__import__(options.extension)
extension_module = sys.modules[options.extension]
except ImportError:
# TODO: Document extensions in a wiki page, place link here
print("Could not import extension %s! Please ensure it is a proper "
"Python module." % options.extension)
exit(2)
else:
extension_module = transitfeed
gtfs_factory = extension_module.GetGtfsFactory()
print('validating %s' % feed)
print('FeedValidator extension used: %s' % options.extension)
loader = gtfs_factory.Loader(feed, problems=problems, extra_validation=False,
memory_db=options.memory_db,
check_duplicate_trips=\
options.check_duplicate_trips,
gtfs_factory=gtfs_factory)
schedule = loader.Load()
# Start validation: children are already validated by the loader.
schedule.Validate(service_gap_interval=options.service_gap_interval,
validate_children=False)
if feed == 'IWantMyvalidation-crash.txt':
# See tests/testfeedvalidator.py
raise Exception('For testing the feed validator crash handler.')
accumulator = problems.GetAccumulator()
if accumulator.HasIssues():
print('ERROR: %s found' % accumulator.FormatCount())
return schedule, 1
else:
print('feed validated successfully')
return schedule, 0
|
Validate feed, returning the loaded Schedule and exit code.
Args:
feed: GTFS file, either path of the file as a string or a file object
options: options object returned by optparse
problems: transitfeed.ProblemReporter instance
Returns:
a transitfeed.Schedule object, exit code and plain text string of other
problems
Exit code is 2 if an extension is provided but can't be loaded, 1 if
problems are found and 0 if the Schedule is problem free.
plain text string is '' if no other problems are found.
|
def lineincustcols (inlist,colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) != StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
|
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
|
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is a Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
|
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is a Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
|
def plot_covariance(self, corr=False, param_slice=None, tick_labels=None, tick_params=None):
"""
Plots the covariance matrix of the posterior as a Hinton diagram.
.. note::
This function requires that mpltools is installed.
:param bool corr: If `True`, the covariance matrix is first normalized
by the outer product of the square root diagonal of the covariance matrix
such that the correlation matrix is plotted instead.
:param slice param_slice: Slice of the modelparameters to
be plotted.
:param list tick_labels: List of tick labels for each component;
by default, these are drawn from the model itself.
"""
if mpls is None:
raise ImportError("Hinton diagrams require mpltools.")
if param_slice is None:
param_slice = np.s_[:]
tick_labels = (
list(range(len(self.model.modelparam_names[param_slice]))),
tick_labels
if tick_labels is not None else
list(map(u"${}$".format, self.model.modelparam_names[param_slice]))
)
cov = self.est_covariance_mtx(corr=corr)[param_slice, param_slice]
retval = mpls.hinton(cov)
plt.xticks(*tick_labels, **(tick_params if tick_params is not None else {}))
plt.yticks(*tick_labels, **(tick_params if tick_params is not None else {}))
plt.gca().xaxis.tick_top()
return retval
|
Plots the covariance matrix of the posterior as a Hinton diagram.
.. note::
This function requires that mpltools is installed.
:param bool corr: If `True`, the covariance matrix is first normalized
by the outer product of the square root diagonal of the covariance matrix
such that the correlation matrix is plotted instead.
:param slice param_slice: Slice of the modelparameters to
be plotted.
:param list tick_labels: List of tick labels for each component;
by default, these are drawn from the model itself.
|
def maskname(mask):
"""
Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str
"""
ms = mask
name = '%s'
if mask & IN_ISDIR:
ms = mask - IN_ISDIR
name = '%s|IN_ISDIR'
return name % EventsCodes.ALL_VALUES[ms]
|
Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str
|
async def on_raw_privmsg(self, message):
""" Modify PRIVMSG to redirect CTCP messages. """
nick, metadata = self._parse_user(message.source)
target, msg = message.params
if is_ctcp(msg):
self._sync_user(nick, metadata)
type, contents = parse_ctcp(msg)
# Find dedicated handler if it exists.
attr = 'on_ctcp_' + pydle.protocol.identifierify(type)
if hasattr(self, attr):
await getattr(self, attr)(nick, target, contents)
# Invoke global handler.
await self.on_ctcp(nick, target, type, contents)
else:
await super().on_raw_privmsg(message)
|
Modify PRIVMSG to redirect CTCP messages.
|
def cancel_reason(self, cancel_reason):
"""
Sets the cancel_reason of this OrderFulfillmentPickupDetails.
A description of why the pickup was canceled. Max length is 100 characters.
:param cancel_reason: The cancel_reason of this OrderFulfillmentPickupDetails.
:type: str
"""
if cancel_reason is None:
raise ValueError("Invalid value for `cancel_reason`, must not be `None`")
if len(cancel_reason) > 100:
raise ValueError("Invalid value for `cancel_reason`, length must be less than `100`")
self._cancel_reason = cancel_reason
|
Sets the cancel_reason of this OrderFulfillmentPickupDetails.
A description of why the pickup was canceled. Max length is 100 characters.
:param cancel_reason: The cancel_reason of this OrderFulfillmentPickupDetails.
:type: str
|
def add_task(self, subject, status, **attrs):
"""
Add a :class:`Task` to the current :class:`UserStory` and return it.
:param subject: subject of the :class:`Task`
:param status: status of the :class:`Task`
:param attrs: optional attributes for :class:`Task`
"""
return Tasks(self.requester).create(
self.project, subject, status,
user_story=self.id, **attrs
)
|
Add a :class:`Task` to the current :class:`UserStory` and return it.
:param subject: subject of the :class:`Task`
:param status: status of the :class:`Task`
:param attrs: optional attributes for :class:`Task`
|
def get_class_by_id(self, ac_id: int) -> AssetClass:
""" Finds the asset class by id """
assert isinstance(ac_id, int)
# iterate recursively
for ac in self.asset_classes:
if ac.id == ac_id:
return ac
# if nothing returned so far.
return None
|
Finds the asset class by id
|
def notes(self, item_type, item_id):
"""Get the notes from pagination"""
payload = {
'order_by': 'updated_at',
'sort': 'asc',
'per_page': PER_PAGE
}
path = urijoin(item_type, str(item_id), GitLabClient.NOTES)
return self.fetch_items(path, payload)
|
Get the notes from pagination
|
def SeqN(n, *inner_rules, **kwargs):
"""
A rule that accepts a sequence of tokens satisfying ``rules`` and returns
the value returned by rule number ``n``, or None if the first rule was not satisfied.
"""
@action(Seq(*inner_rules), loc=kwargs.get("loc", None))
def rule(parser, *values):
return values[n]
return rule
|
A rule that accepts a sequence of tokens satisfying ``rules`` and returns
the value returned by rule number ``n``, or None if the first rule was not satisfied.
|
def defaults(cls, *options, **kwargs):
"""Set default options for a session.
Set default options for a session. whether in a Python script or
a Jupyter notebook.
Args:
*options: Option objects used to specify the defaults.
backend: The plotting extension the options apply to
"""
if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend':
raise Exception('opts.defaults only accepts "backend" keyword argument')
cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))
|
Set default options for a session.
Set default options for a session. whether in a Python script or
a Jupyter notebook.
Args:
*options: Option objects used to specify the defaults.
backend: The plotting extension the options apply to
|
def reverse_media_url(target_type, url_string, *args, **kwargs):
'''
Given a target type and an resource URL, generates a valid URL to this via
'''
args_str = '<%s>' % '><'.join(args)
kwargs_str = '<%s>' % '><'.join('%s:%s' % pair for pair in kwargs.items())
url_str = ''.join([url_string, args_str, kwargs_str])
normalized_url = str(ResourceURL(url_str))
query_tuples = []
if singletons.settings.SECURITY and 'Sha1' in singletons.settings.SECURITY:
secret = singletons.settings.HMAC_SECRET
digest = get_hmac_sha1_digest(secret, normalized_url, target_type)
query_tuples.append(('digest', digest))
# Add in URL as last querystring argument
query_tuples.append(('url', normalized_url))
querystring = urlencode(query_tuples)
scheme = singletons.settings.EXTERNAL_SCHEME
host = singletons.settings.EXTERNAL_HOST
port = singletons.settings.EXTERNAL_PORT
if not host:
host = singletons.settings.HOST
if not port:
port = singletons.settings.PORT
port_suffix = ':%s' % port if port != 80 else ''
typestring_normalized = str(TypeString(target_type))
return '%s://%s%s/media/%s/?%s' % (
scheme,
host,
port_suffix,
typestring_normalized,
querystring,
)
|
Given a target type and an resource URL, generates a valid URL to this via
|
def _slice_mostly_sorted(array, keep, rest, ind=None):
"""Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array
"""
if ind is None:
ind = np.arange(len(array))
idx = np.argsort(np.concatenate([keep, ind[rest]]))
slices = []
if keep[0] > 0: # avoid creating empty slices
slices.append(slice(None, keep[0]))
slices.append([keep[0]])
windows = zip(keep[:-1], keep[1:])
for l, r in windows:
if r > l + 1: # avoid creating empty slices
slices.append(slice(l + 1, r))
slices.append([r])
if keep[-1] < len(array) - 1: # avoid creating empty slices
slices.append(slice(keep[-1] + 1, None))
result = da.concatenate([array[idx[slice_]] for slice_ in slices])
return result
|
Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array
|
def setdict(self, D=None):
"""Set dictionary array."""
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbd_sm_c(
self.Df, np.conj(self.Df),
(self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM)
else:
self.c = None
|
Set dictionary array.
|
async def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(b('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(b('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, str):
pieces.append(b('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(b('GET'))
pieces.append(g)
if desc:
pieces.append(b('DESC'))
if alpha:
pieces.append(b('ALPHA'))
if store is not None:
pieces.append(b('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, str) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return await self.execute_command('SORT', *pieces, **options)
|
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
|
def getJobStore(cls, locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
name, rest = cls.parseLocator(locator)
if name == 'file':
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == 'aws':
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == 'azure':
from toil.jobStores.azureJobStore import AzureJobStore
return AzureJobStore(rest)
elif name == 'google':
from toil.jobStores.googleJobStore import GoogleJobStore
return GoogleJobStore(rest)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
|
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
|
def delete_hc(kwargs=None, call=None):
'''
Permanently delete a health check.
CLI Example:
.. code-block:: bash
salt-cloud -f delete_hc gce name=hc
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_hc function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when deleting a health check.'
)
return False
name = kwargs['name']
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'delete health_check',
'salt/cloud/healthcheck/deleting',
args={
'name': name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
result = conn.ex_destroy_healthcheck(
conn.ex_get_healthcheck(name)
)
except ResourceNotFoundError as exc:
log.error(
'Health check %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
__utils__['cloud.fire_event'](
'event',
'deleted health_check',
'salt/cloud/healthcheck/deleted',
args={
'name': name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
Permanently delete a health check.
CLI Example:
.. code-block:: bash
salt-cloud -f delete_hc gce name=hc
|
def getComponentState(self, pchRenderModelName, pchComponentName):
"""This version of GetComponentState takes a controller state block instead of an action origin. This function is deprecated. You should use the new input system and GetComponentStateForDevicePath instead."""
fn = self.function_table.getComponentState
pControllerState = VRControllerState_t()
pState = RenderModel_ControllerMode_State_t()
pComponentState = RenderModel_ComponentState_t()
result = fn(pchRenderModelName, pchComponentName, byref(pControllerState), byref(pState), byref(pComponentState))
return result, pControllerState, pState, pComponentState
|
This version of GetComponentState takes a controller state block instead of an action origin. This function is deprecated. You should use the new input system and GetComponentStateForDevicePath instead.
|
def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
deflection_grid *= quad_grid(self.deflection_func, 0.0, 1.0, grid,
args=(npow, self.axis_ratio, self.kappa_s,
self.scale_radius))[0]
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_profile(np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T))
|
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
|
def create_shot(self, ):
"""Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
try:
shot = djadapter.models.Shot(sequence=self.sequence, project=self.sequence.project, name=name, description=desc)
shot.save()
self.shot = shot
self.accept()
except:
log.exception("Could not create new shot")
|
Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
|
def rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask):
"""
rebins pixels, updates cutout image, wht_map, sigma_bkg, coordinates, PSF
:param bin_size: number of pixels (per axis) to merge
:return:
"""
numPix = int(len(image)/bin_size)
numPix_precut = numPix * bin_size
factor = int(len(image)/numPix)
if not numPix * bin_size == len(image):
image_precut = image[0:numPix_precut, 0:numPix_precut]
else:
image_precut = image
image_resized = re_size(image_precut, factor)
image_resized *= bin_size**2
wht_map_resized = re_size(wht_map[0:numPix_precut, 0:numPix_precut], factor)
sigma_bkg_resized = bin_size*sigma_bkg
ra_coords_resized = re_size(ra_coords[0:numPix_precut, 0:numPix_precut], factor)
dec_coords_resized = re_size(dec_coords[0:numPix_precut, 0:numPix_precut], factor)
idex_mask_resized = re_size(idex_mask[0:numPix_precut, 0:numPix_precut], factor)
idex_mask_resized[idex_mask_resized > 0] = 1
return image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized
|
rebins pixels, updates cutout image, wht_map, sigma_bkg, coordinates, PSF
:param bin_size: number of pixels (per axis) to merge
:return:
|
def log_with_color(level):
""" log with color by different level
"""
def wrapper(text):
color = log_colors_config[level.upper()]
getattr(logger, level.lower())(coloring(text, color))
return wrapper
|
log with color by different level
|
def mv(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
(new_data_uoa) - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'copy' function
}
"""
# Check if global writing is allowed
r=check_writing({'delete':'yes'})
if r['return']>0: return r
# Check if wild cards
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if (duoa.find('*')>=0 or duoa.find('?')>=0) and nduoa=='' and nduid=='':
r=list_data({'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa})
if r['return']>0: return r
lst=r['lst']
else:
lst=[{'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa}]
i['move']='yes'
i['keep_old_uid']='yes'
r={'return':0}
for ll in lst:
i['repo_uoa']=ll['repo_uoa']
i['module_uoa']=ll['module_uoa']
i['data_uoa']=ll['data_uoa']
r=copy(i)
if r['return']>0: return r
return r
|
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
(new_data_uoa) - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'copy' function
}
|
def interpolate(text, global_dict=None, local_dict=None):
'''Evaluate expressions in `text` '''
# step 1, make it a f-string (add quotation marks and f
# step 2, evaluate as a string
try:
return eval(as_fstring(text), global_dict, local_dict)
except Exception as e:
raise ValueError(f'Failed to interpolate {text}: {e}')
|
Evaluate expressions in `text`
|
def infix_handle(tokens):
"""Process infix calls."""
func, args = get_infix_items(tokens, callback=infix_handle)
return "(" + func + ")(" + ", ".join(args) + ")"
|
Process infix calls.
|
def get_tag(self, tag):
"""
::
GET /:login/machines/:id/tags/:tag
:Returns: the value for a single tag
:rtype: :py:class:`basestring`
"""
headers = {'Accept': 'text/plain'}
j, _ = self.datacenter.request('GET', self.path + '/tags/' + tag)
return j
|
::
GET /:login/machines/:id/tags/:tag
:Returns: the value for a single tag
:rtype: :py:class:`basestring`
|
def ports(self):
'''The list of ports involved in this connection.
The result is a list of tuples, (port name, port object). Each port
name is a full path to the port (e.g. /localhost/Comp0.rtc:in) if
this Connection object is owned by a Port, which is in turn owned by
a Component in the tree. Otherwise, only the port's name will be used
(in which case it will be the full port name, which will include the
component name, e.g. 'ConsoleIn0.in'). The full path can be used to
find ports in the tree.
If, for some reason, the owner node of a port cannot be found, that
entry in the list will contain ('Unknown', None). This typically means
that a component's name has been clobbered on the name server.
This list will be created at the first reference to this property.
This means that the first reference may be delayed by CORBA calls,
but others will return quickly (unless a delayed reparse has been
triggered).
'''
def has_port(node, args):
if node.get_port_by_ref(args):
return node
return None
with self._mutex:
if not self._ports:
self._ports = []
for p in self._obj.ports:
# My owner's owner is a component node in the tree
if self.owner and self.owner.owner:
root = self.owner.owner.root
owner_nodes = [n for n in root.iterate(has_port,
args=p, filter=['is_component']) if n]
if not owner_nodes:
self._ports.append(('Unknown', None))
else:
port_owner = owner_nodes[0]
port_owner_path = port_owner.full_path_str
port_name = p.get_port_profile().name
prefix = port_owner.instance_name + '.'
if port_name.startswith(prefix):
port_name = port_name[len(prefix):]
self._ports.append((port_owner_path + ':' + \
port_name, parse_port(p, self.owner.owner)))
else:
self._ports.append((p.get_port_profile().name,
parse_port(p, None)))
return self._ports
|
The list of ports involved in this connection.
The result is a list of tuples, (port name, port object). Each port
name is a full path to the port (e.g. /localhost/Comp0.rtc:in) if
this Connection object is owned by a Port, which is in turn owned by
a Component in the tree. Otherwise, only the port's name will be used
(in which case it will be the full port name, which will include the
component name, e.g. 'ConsoleIn0.in'). The full path can be used to
find ports in the tree.
If, for some reason, the owner node of a port cannot be found, that
entry in the list will contain ('Unknown', None). This typically means
that a component's name has been clobbered on the name server.
This list will be created at the first reference to this property.
This means that the first reference may be delayed by CORBA calls,
but others will return quickly (unless a delayed reparse has been
triggered).
|
def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
in_block = False
changed = True
elif line.strip() == settings.START_TOKEN:
in_block = True
else:
new_file.append(line)
if changed:
with open(settings.HOSTS_FILE, "w") as hosts_file:
hosts_file.write("".join(new_file))
reset_network("Concentration is now lost :(.")
|
Enables access to websites that are defined as 'distractors
|
def register_pubkey(self):
"""
XXX Support compressed point format.
XXX Check that the pubkey received is on the curve.
"""
# point_format = 0
# if self.point[0] in [b'\x02', b'\x03']:
# point_format = 1
curve_name = _tls_named_curves[self.named_curve]
curve = ec._CURVE_TYPES[curve_name]()
import_point = ec.EllipticCurvePublicNumbers.from_encoded_point
pubnum = import_point(curve, self.point)
s = self.tls_session
s.server_kx_pubkey = pubnum.public_key(default_backend())
if not s.client_kx_ecdh_params:
s.client_kx_ecdh_params = curve
|
XXX Support compressed point format.
XXX Check that the pubkey received is on the curve.
|
def update_checkplotdict_nbrlcs(
checkplotdict,
timecol, magcol, errcol,
lcformat='hat-sql',
lcformatdir=None,
verbose=True,
):
'''For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return checkplotdict
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return checkplotdict
if not ('neighbors' in checkplotdict and
checkplotdict['neighbors'] and
len(checkplotdict['neighbors']) > 0):
LOGERROR('no neighbors for %s, not updating...' %
(checkplotdict['objectid']))
return checkplotdict
# get our object's magkeys to compare to the neighbor
objmagkeys = {}
# handle diff generations of checkplots
if 'available_bands' in checkplotdict['objectinfo']:
mclist = checkplotdict['objectinfo']['available_bands']
else:
mclist = ('bmag','vmag','rmag','imag','jmag','hmag','kmag',
'sdssu','sdssg','sdssr','sdssi','sdssz')
for mc in mclist:
if (mc in checkplotdict['objectinfo'] and
checkplotdict['objectinfo'][mc] is not None and
np.isfinite(checkplotdict['objectinfo'][mc])):
objmagkeys[mc] = checkplotdict['objectinfo'][mc]
# if there are actually neighbors, go through them in order
for nbr in checkplotdict['neighbors']:
objectid, lcfpath = (nbr['objectid'],
nbr['lcfpath'])
# get the light curve
if not os.path.exists(lcfpath):
LOGERROR('objectid: %s, neighbor: %s, '
'lightcurve: %s not found, skipping...' %
(checkplotdict['objectid'], objectid, lcfpath))
continue
lcdict = readerfunc(lcfpath)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# 0. get this neighbor's magcols and get the magdiff and colordiff
# between it and the object
nbrmagkeys = {}
for mc in objmagkeys:
if (('objectinfo' in lcdict) and
(isinstance(lcdict['objectinfo'], dict)) and
(mc in lcdict['objectinfo']) and
(lcdict['objectinfo'][mc] is not None) and
(np.isfinite(lcdict['objectinfo'][mc]))):
nbrmagkeys[mc] = lcdict['objectinfo'][mc]
# now calculate the magdiffs
magdiffs = {}
for omc in objmagkeys:
if omc in nbrmagkeys:
magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc]
# calculate colors and colordiffs
colordiffs = {}
# generate the list of colors to get
# NOTE: here, we don't really bother with new/old gen checkplots
# maybe change this later to handle arbitrary colors
for ctrio in (['bmag','vmag','bvcolor'],
['vmag','kmag','vkcolor'],
['jmag','kmag','jkcolor'],
['sdssi','jmag','ijcolor'],
['sdssg','kmag','gkcolor'],
['sdssg','sdssr','grcolor']):
m1, m2, color = ctrio
if (m1 in objmagkeys and
m2 in objmagkeys and
m1 in nbrmagkeys and
m2 in nbrmagkeys):
objcolor = objmagkeys[m1] - objmagkeys[m2]
nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2]
colordiffs[color] = objcolor - nbrcolor
# finally, add all the color and magdiff info to the nbr dict
nbr.update({'magdiffs':magdiffs,
'colordiffs':colordiffs})
#
# process magcols
#
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
try:
# get the times, mags, and errs
# dereference the columns and get them from the lcdict
if '.' in timecol:
timecolget = timecol.split('.')
else:
timecolget = [timecol]
times = _dict_get(lcdict, timecolget)
if '.' in magcol:
magcolget = magcol.split('.')
else:
magcolget = [magcol]
mags = _dict_get(lcdict, magcolget)
if '.' in errcol:
errcolget = errcol.split('.')
else:
errcolget = [errcol]
errs = _dict_get(lcdict, errcolget)
except KeyError:
LOGERROR('LC for neighbor: %s (target object: %s) does not '
'have one or more of the required columns: %s, '
'skipping...' %
(objectid, checkplotdict['objectid'],
', '.join([timecol, magcol, errcol])))
continue
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=4.0)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
stimes, smags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, serrs
else:
xtimes, xmags, xerrs = stimes, smags, serrs
# check if this neighbor has enough finite points in its LC
# fail early if not enough light curve points
if ((xtimes is None) or (xmags is None) or (xerrs is None) or
(xtimes.size < 49) or (xmags.size < 49) or (xerrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make neighbor light curve plots "
"for target: %s, neighbor: %s, neighbor LC: %s" %
(checkplotdict['objectid'],
nbr['objectid'],
nbr['lcfpath']))
continue
#
# now we can start doing stuff if everything checks out
#
# make an unphased mag-series plot
nbrdict = _pkl_magseries_plot(xtimes,
xmags,
xerrs,
magsarefluxes=magsarefluxes)
# update the nbr
nbr.update(nbrdict)
# for each lspmethod in the checkplot, make a corresponding plot for
# this neighbor
# figure out the period finder methods present
if 'pfmethods' in checkplotdict:
pfmethods = checkplotdict['pfmethods']
else:
pfmethods = []
for cpkey in checkplotdict:
for pfkey in PFMETHODS:
if pfkey in cpkey:
pfmethods.append(pfkey)
for lspt in pfmethods:
# initialize this lspmethod entry
nbr[lspt] = {}
# we only care about the best period and its options
operiod, oepoch = (checkplotdict[lspt][0]['period'],
checkplotdict[lspt][0]['epoch'])
(ophasewrap, ophasesort, ophasebin,
ominbinelems, oplotxlim) = (
checkplotdict[lspt][0]['phasewrap'],
checkplotdict[lspt][0]['phasesort'],
checkplotdict[lspt][0]['phasebin'],
checkplotdict[lspt][0]['minbinelems'],
checkplotdict[lspt][0]['plotxlim'],
)
# make the phasedlc plot for this period
nbr = _pkl_phased_magseries_plot(
nbr,
lspt.split('-')[1], # this splits '<pfindex>-<pfmethod>'
0,
xtimes, xmags, xerrs,
operiod, oepoch,
phasewrap=ophasewrap,
phasesort=ophasesort,
phasebin=ophasebin,
minbinelems=ominbinelems,
plotxlim=oplotxlim,
magsarefluxes=magsarefluxes,
verbose=verbose,
override_pfmethod=lspt
)
# at this point, this neighbor's dict should be up to date with all
# info, magseries plot, and all phased LC plots
# return the updated checkplotdict
return checkplotdict
|
For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
|
def remove_plugin(self, name, force=False):
"""
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}', name)
res = self._delete(url, params={'force': force})
self._raise_for_status(res)
return True
|
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
|
def DeregisterHelper(cls, analyzer_helper):
"""Deregisters a format analyzer helper.
Args:
analyzer_helper (AnalyzerHelper): analyzer helper.
Raises:
KeyError: if analyzer helper object is not set for the corresponding
type indicator.
"""
if analyzer_helper.type_indicator not in cls._analyzer_helpers:
raise KeyError(
'Analyzer helper object not set for type indicator: {0:s}.'.format(
analyzer_helper.type_indicator))
analyzer_helper = cls._analyzer_helpers[analyzer_helper.type_indicator]
cls._FlushCache(analyzer_helper.format_categories)
del cls._analyzer_helpers[analyzer_helper.type_indicator]
|
Deregisters a format analyzer helper.
Args:
analyzer_helper (AnalyzerHelper): analyzer helper.
Raises:
KeyError: if analyzer helper object is not set for the corresponding
type indicator.
|
def has_reset(self):
"""Checks the grizzly to see if it reset itself because of
voltage sag or other reasons. Useful to reinitialize acceleration or
current limiting."""
currentTime = self._read_as_int(Addr.Uptime, 4)
if currentTime <= self._ticks:
self._ticks = currentTime
return True
self._ticks = currentTime
return False
|
Checks the grizzly to see if it reset itself because of
voltage sag or other reasons. Useful to reinitialize acceleration or
current limiting.
|
def create_datacenter(self, datacenter):
"""
Creates a data center -- both simple and complex are supported.
"""
server_items = []
volume_items = []
lan_items = []
loadbalancer_items = []
entities = dict()
properties = {
"name": datacenter.name
}
# Omit 'location', if not provided, to receive
# a meaningful error message.
if datacenter.location:
properties['location'] = datacenter.location
# Optional Properties
if datacenter.description:
properties['description'] = datacenter.description
# Servers
if datacenter.servers:
for server in datacenter.servers:
server_items.append(self._create_server_dict(server))
servers = {
"items": server_items
}
server_entities = {
"servers": servers
}
entities.update(server_entities)
# Volumes
if datacenter.volumes:
for volume in datacenter.volumes:
volume_items.append(self._create_volume_dict(volume))
volumes = {
"items": volume_items
}
volume_entities = {
"volumes": volumes
}
entities.update(volume_entities)
# Load Balancers
if datacenter.loadbalancers:
for loadbalancer in datacenter.loadbalancers:
loadbalancer_items.append(
self._create_loadbalancer_dict(
loadbalancer
)
)
loadbalancers = {
"items": loadbalancer_items
}
loadbalancer_entities = {
"loadbalancers": loadbalancers
}
entities.update(loadbalancer_entities)
# LANs
if datacenter.lans:
for lan in datacenter.lans:
lan_items.append(
self._create_lan_dict(lan)
)
lans = {
"items": lan_items
}
lan_entities = {
"lans": lans
}
entities.update(lan_entities)
if not entities:
raw = {
"properties": properties,
}
else:
raw = {
"properties": properties,
"entities": entities
}
data = json.dumps(raw)
response = self._perform_request(
url='/datacenters',
method='POST',
data=data)
return response
|
Creates a data center -- both simple and complex are supported.
|
def replace_label(self, oldLabel, newLabel):
""" Replaces old label with a new one
"""
if oldLabel == newLabel:
return
tmp = re.compile(r'\b' + oldLabel + r'\b')
last = 0
l = len(newLabel)
while True:
match = tmp.search(self.asm[last:])
if not match:
break
txt = self.asm
self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():]
last += match.start() + l
|
Replaces old label with a new one
|
def format_stats(self, stats:TensorOrNumList)->None:
"Format stats before printing."
str_stats = []
for name,stat in zip(self.names,stats):
str_stats.append('#na#' if stat is None else str(stat) if isinstance(stat, int) else f'{stat:.6f}')
if self.add_time: str_stats.append(format_time(time() - self.start_epoch))
if not self.silent: self.pbar.write(str_stats, table=True)
|
Format stats before printing.
|
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
:param request: Pyramid Request object.
:param obj: The SQLAlchemy model instance that permissions will be evaluated against.
"""
for permission in self.get_permissions():
if not permission.has_object_permission(request, self, obj):
self.permission_denied(request, message=getattr(permission, 'message', None))
|
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
:param request: Pyramid Request object.
:param obj: The SQLAlchemy model instance that permissions will be evaluated against.
|
def binaryorbit(orbit, comp1, comp2, envelope=None):
"""
Build the string representation of a hierarchy containing a binary
orbit with 2 components.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.set_hierarchy`
:parameter comp1: an existing hierarchy string, Parameter, or ParameterSet
:parameter comp2: an existing hierarchy string, Parameter, or ParameterSet
:return: the string representation of the hierarchy
"""
if envelope:
return '{}({}, {}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2), _to_component(envelope, False))
else:
return '{}({}, {})'.format(_to_component(orbit, False), _to_component(comp1), _to_component(comp2))
|
Build the string representation of a hierarchy containing a binary
orbit with 2 components.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.set_hierarchy`
:parameter comp1: an existing hierarchy string, Parameter, or ParameterSet
:parameter comp2: an existing hierarchy string, Parameter, or ParameterSet
:return: the string representation of the hierarchy
|
def need_summary(self, now, max_updates, max_age):
"""
Helper method to determine if a "summarize" record should be
added.
:param now: The current time.
:param max_updates: Maximum number of updates before a
summarize is required.
:param max_age: Maximum age of the last summarize record.
This is used in the case where a summarize
request has been lost by the compactor.
:returns: True if a "summarize" record should be added, False
otherwise.
"""
# Handle the case where an old summarize record exists
if self.summarized is True and self.last_summarize_ts + max_age <= now:
return True
return self.summarized is False and self.updates >= max_updates
|
Helper method to determine if a "summarize" record should be
added.
:param now: The current time.
:param max_updates: Maximum number of updates before a
summarize is required.
:param max_age: Maximum age of the last summarize record.
This is used in the case where a summarize
request has been lost by the compactor.
:returns: True if a "summarize" record should be added, False
otherwise.
|
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position,
field_position_global=None,
field_position_local=None):
"""Modify subfield at specified position.
Specify the subfield by tag, field number and subfield position.
"""
subfields = record_get_subfields(
rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfields[subfield_position] = (subfield_code, value)
except IndexError:
raise InvenioBibRecordFieldError(
"There is no subfield with position '%d'." % subfield_position)
|
Modify subfield at specified position.
Specify the subfield by tag, field number and subfield position.
|
def replace_widgets(self, widgets, team_context, dashboard_id, eTag=None):
"""ReplaceWidgets.
[Preview API] Replace the widgets on specified dashboard with the supplied widgets.
:param [Widget] widgets: Revised state of widgets to store for the dashboard.
:param :class:`<TeamContext> <azure.devops.v5_0.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the Dashboard to modify.
:param String eTag: Dashboard Widgets Version
:rtype: :class:`<WidgetsVersionedList> <azure.devops.v5_0.dashboard.models.WidgetsVersionedList>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
content = self._serialize.body(widgets, '[Widget]')
response = self._send(http_method='PUT',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='5.0-preview.2',
route_values=route_values,
content=content)
response_object = models.WidgetsVersionedList()
response_object.widgets = self._deserialize('[Widget]', self._unwrap_collection(response))
response_object.eTag = response.headers.get('ETag')
return response_object
|
ReplaceWidgets.
[Preview API] Replace the widgets on specified dashboard with the supplied widgets.
:param [Widget] widgets: Revised state of widgets to store for the dashboard.
:param :class:`<TeamContext> <azure.devops.v5_0.dashboard.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id: ID of the Dashboard to modify.
:param String eTag: Dashboard Widgets Version
:rtype: :class:`<WidgetsVersionedList> <azure.devops.v5_0.dashboard.models.WidgetsVersionedList>`
|
def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs)
|
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
|
def render_layout(layout_name, content, **context):
"""Uses a jinja template to wrap the content inside a layout.
Wraps the content inside a block and adds the extend statement before rendering it
with jinja. The block name can be specified in the layout_name after the filename separated
by a colon. The default block name is "content".
"""
layout_block = "content"
if ":" in layout_name:
layout_name, layout_block = layout_name.split(":")
tpl = '{%% extends "%s" %%}{%% block %s %%}%s{%% endblock %%}' % (layout_name, layout_block, content)
return render_template_string(tpl, **context)
|
Uses a jinja template to wrap the content inside a layout.
Wraps the content inside a block and adds the extend statement before rendering it
with jinja. The block name can be specified in the layout_name after the filename separated
by a colon. The default block name is "content".
|
def update_notify_on_sample_invalidation(portal):
"""The name of the Setup field was NotifyOnARRetract, so it was
confusing. There was also two fields "NotifyOnRejection"
"""
setup = api.get_setup()
# NotifyOnARRetract --> NotifyOnSampleInvalidation
old_value = setup.__dict__.get("NotifyOnARRetract", True)
setup.setNotifyOnSampleInvalidation(old_value)
# NotifyOnRejection --> NotifyOnSampleRejection
old_value = setup.__dict__.get("NotifyOnRejection", False)
setup.setNotifyOnSampleRejection(old_value)
|
The name of the Setup field was NotifyOnARRetract, so it was
confusing. There was also two fields "NotifyOnRejection"
|
def make_iaf_stack(total_event_size,
num_hidden_layers=2,
seed=None,
dtype=tf.float32):
"""Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
"""
seed = tfd.SeedStream(seed, 'make_iaf_stack')
def make_iaf():
"""Create an IAF."""
initializer = tf.compat.v2.keras.initializers.VarianceScaling(
2 * 0.01, seed=seed() % (2**31 - 1))
made = tfb.AutoregressiveLayer(
params=2,
event_shape=[total_event_size],
hidden_units=[total_event_size] * num_hidden_layers,
activation=tf.nn.elu,
kernel_initializer=initializer,
dtype=dtype)
def shift_and_scale(x):
# TODO(siege): Something is losing the static shape.
x.set_shape(
x.shape.merge_with([None] * (x.shape.ndims - 1) + [total_event_size]))
return tf.unstack(made(x), num=2, axis=-1)
return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))
def make_swap():
"""Create an swap."""
permutation = list(reversed(range(total_event_size)))
return tfb.Permute(permutation)
bijector = make_iaf()
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
return bijector
|
Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
|
def last_position(self):
"""
Returns the last position of this Sprite as a tuple
(x, y, width, height).
"""
return self._old_x, self._old_y, self._old_width, self._old_height
|
Returns the last position of this Sprite as a tuple
(x, y, width, height).
|
def set_dns(name, dnsservers=None, searchdomains=None, path=None):
'''
.. versionchanged:: 2015.5.0
The ``dnsservers`` and ``searchdomains`` parameters can now be passed
as a comma-separated list.
Update /etc/resolv.confo
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.set_dns ubuntu "['8.8.8.8', '4.4.4.4']"
'''
if dnsservers is None:
dnsservers = ['8.8.8.8', '4.4.4.4']
elif not isinstance(dnsservers, list):
try:
dnsservers = dnsservers.split(',')
except AttributeError:
raise SaltInvocationError(
'Invalid input for \'dnsservers\' parameter'
)
if searchdomains is None:
searchdomains = []
elif not isinstance(searchdomains, list):
try:
searchdomains = searchdomains.split(',')
except AttributeError:
raise SaltInvocationError(
'Invalid input for \'searchdomains\' parameter'
)
dns = ['nameserver {0}'.format(x) for x in dnsservers]
dns.extend(['search {0}'.format(x) for x in searchdomains])
dns = '\n'.join(dns) + '\n'
# we may be using resolvconf in the container
# We need to handle that case with care:
# - we create the resolv.conf runtime directory (the
# linked directory) as anyway it will be shadowed when the real
# runned tmpfs mountpoint will be mounted.
# ( /etc/resolv.conf -> ../run/resolvconf/resolv.conf)
# Indeed, it can save us in any other case (running, eg, in a
# bare chroot when repairing or preparing the container for
# operation.
# - We also teach resolvconf to use the aforementioned dns.
# - We finally also set /etc/resolv.conf in all cases
rstr = __salt__['test.random_hash']()
# no tmp here, apparmor won't let us execute !
script = '/sbin/{0}_dns.sh'.format(rstr)
DNS_SCRIPT = "\n".join([
# 'set -x',
'#!/usr/bin/env bash',
'if [ -h /etc/resolv.conf ];then',
' if [ "x$(readlink /etc/resolv.conf)"'
' = "x../run/resolvconf/resolv.conf" ];then',
' if [ ! -d /run/resolvconf/ ];then',
' mkdir -p /run/resolvconf',
' fi',
' cat > /etc/resolvconf/resolv.conf.d/head <<EOF',
dns,
'EOF',
'',
' fi',
'fi',
'cat > /etc/resolv.conf <<EOF',
dns,
'EOF',
''])
result = run_all(
name, 'tee {0}'.format(script), path=path,
stdin=DNS_SCRIPT, python_shell=True)
if result['retcode'] == 0:
result = run_all(
name, 'sh -c "chmod +x {0};{0}"'.format(script),
path=path, python_shell=True)
# blindly delete the setter file
run_all(name,
'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''.format(script),
path=path, python_shell=True)
if result['retcode'] != 0:
error = ('Unable to write to /etc/resolv.conf in container \'{0}\''
.format(name))
if result['stderr']:
error += ': {0}'.format(result['stderr'])
raise CommandExecutionError(error)
return True
|
.. versionchanged:: 2015.5.0
The ``dnsservers`` and ``searchdomains`` parameters can now be passed
as a comma-separated list.
Update /etc/resolv.confo
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.set_dns ubuntu "['8.8.8.8', '4.4.4.4']"
|
def rehook_symbol(self, new_address, symbol_name):
"""
Move the hook for a symbol to a specific address
:param new_address: the new address that will trigger the SimProc execution
:param symbol_name: the name of the symbol (f.i. strcmp )
:return: None
"""
new_sim_procedures = {}
for key_address, simproc_obj in self._sim_procedures.items():
if simproc_obj.display_name == symbol_name:
new_sim_procedures[new_address] = simproc_obj
else:
new_sim_procedures[key_address] = simproc_obj
self._sim_procedures = new_sim_procedures
|
Move the hook for a symbol to a specific address
:param new_address: the new address that will trigger the SimProc execution
:param symbol_name: the name of the symbol (f.i. strcmp )
:return: None
|
def write8(self, value, char_mode=False):
"""Write 8-bit value in character or data mode. Value should be an int
value from 0-255, and char_mode is True if character data or False if
non-character data (default).
"""
# One millisecond delay to prevent writing too quickly.
self._delay_microseconds(1000)
# Set character / data bit.
self._gpio.output(self._rs, char_mode)
# Write upper 4 bits.
self._gpio.output_pins({ self._d4: ((value >> 4) & 1) > 0,
self._d5: ((value >> 5) & 1) > 0,
self._d6: ((value >> 6) & 1) > 0,
self._d7: ((value >> 7) & 1) > 0 })
self._pulse_enable()
# Write lower 4 bits.
self._gpio.output_pins({ self._d4: (value & 1) > 0,
self._d5: ((value >> 1) & 1) > 0,
self._d6: ((value >> 2) & 1) > 0,
self._d7: ((value >> 3) & 1) > 0 })
self._pulse_enable()
|
Write 8-bit value in character or data mode. Value should be an int
value from 0-255, and char_mode is True if character data or False if
non-character data (default).
|
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
|
If we are in a progress scope, and no log messages have been
shown, write out another '.
|
def show_warnings(self):
"""Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned."""
if self._server_version < (4,1): return ()
self.query("SHOW WARNINGS")
r = self.store_result()
warnings = r.fetch_row(0)
return warnings
|
Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned.
|
def from_pcount(nevents):
"""We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity."""
if nevents < 0:
raise ValueError('Poisson parameter `nevents` must be nonnegative')
return Uval(np.random.gamma(nevents + 1, size=uval_nsamples))
|
We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity.
|
def calc_qdb_v1(self):
"""Calculate direct runoff released from the soil.
Required control parameters:
|NHRU|
|Lnk|
|NFk|
|BSf|
Required state sequence:
|BoWa|
Required flux sequence:
|WaDa|
Calculated flux sequence:
|QDB|
Basic equations:
:math:`QDB = \\Bigl \\lbrace
{
{max(Exz, 0) \\ | \\ SfA \\leq 0}
\\atop
{max(Exz + NFk \\cdot SfA^{BSf+1}, 0) \\ | \\ SfA > 0}
}`
:math:`SFA = (1 - \\frac{BoWa}{NFk})^\\frac{1}{BSf+1} -
\\frac{WaDa}{(BSf+1) \\cdot NFk}`
:math:`Exz = (BoWa + WaDa) - NFk`
Examples:
For water areas (|FLUSS| and |SEE|), sealed areas (|VERS|), and
areas without any soil storage capacity, all water is completely
routed as direct runoff |QDB| (see the first four HRUs). No
principal distinction is made between the remaining land use
classes (arable land |ACKER| has been selected for the last five
HRUs arbitrarily):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(9)
>>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER, ACKER)
>>> bsf(0.4)
>>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0)
>>> fluxes.wada = 10.0
>>> states.bowa = (
... 100.0, 100.0, 100.0, 0.0, -0.1, 0.0, 50.0, 100.0, 100.1)
>>> model.calc_qdb_v1()
>>> fluxes.qdb
qdb(10.0, 10.0, 10.0, 10.0, 0.142039, 0.144959, 1.993649, 10.0, 10.1)
With the common |BSf| value of 0.4, the discharge coefficient
increases more or less exponentially with soil moisture.
For soil moisture values slightly below zero or above usable
field capacity, plausible amounts of generated direct runoff
are ensured.
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
aid = self.sequences.aides.fastaccess
for k in range(con.nhru):
if con.lnk[k] == WASSER:
flu.qdb[k] = 0.
elif ((con.lnk[k] in (VERS, FLUSS, SEE)) or
(con.nfk[k] <= 0.)):
flu.qdb[k] = flu.wada[k]
else:
if sta.bowa[k] < con.nfk[k]:
aid.sfa[k] = (
(1.-sta.bowa[k]/con.nfk[k])**(1./(con.bsf[k]+1.)) -
(flu.wada[k]/((con.bsf[k]+1.)*con.nfk[k])))
else:
aid.sfa[k] = 0.
aid.exz[k] = sta.bowa[k]+flu.wada[k]-con.nfk[k]
flu.qdb[k] = aid.exz[k]
if aid.sfa[k] > 0.:
flu.qdb[k] += aid.sfa[k]**(con.bsf[k]+1.)*con.nfk[k]
flu.qdb[k] = max(flu.qdb[k], 0.)
|
Calculate direct runoff released from the soil.
Required control parameters:
|NHRU|
|Lnk|
|NFk|
|BSf|
Required state sequence:
|BoWa|
Required flux sequence:
|WaDa|
Calculated flux sequence:
|QDB|
Basic equations:
:math:`QDB = \\Bigl \\lbrace
{
{max(Exz, 0) \\ | \\ SfA \\leq 0}
\\atop
{max(Exz + NFk \\cdot SfA^{BSf+1}, 0) \\ | \\ SfA > 0}
}`
:math:`SFA = (1 - \\frac{BoWa}{NFk})^\\frac{1}{BSf+1} -
\\frac{WaDa}{(BSf+1) \\cdot NFk}`
:math:`Exz = (BoWa + WaDa) - NFk`
Examples:
For water areas (|FLUSS| and |SEE|), sealed areas (|VERS|), and
areas without any soil storage capacity, all water is completely
routed as direct runoff |QDB| (see the first four HRUs). No
principal distinction is made between the remaining land use
classes (arable land |ACKER| has been selected for the last five
HRUs arbitrarily):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(9)
>>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER, ACKER)
>>> bsf(0.4)
>>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0)
>>> fluxes.wada = 10.0
>>> states.bowa = (
... 100.0, 100.0, 100.0, 0.0, -0.1, 0.0, 50.0, 100.0, 100.1)
>>> model.calc_qdb_v1()
>>> fluxes.qdb
qdb(10.0, 10.0, 10.0, 10.0, 0.142039, 0.144959, 1.993649, 10.0, 10.1)
With the common |BSf| value of 0.4, the discharge coefficient
increases more or less exponentially with soil moisture.
For soil moisture values slightly below zero or above usable
field capacity, plausible amounts of generated direct runoff
are ensured.
|
def register(lifter, arch_name):
"""
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
"""
if issubclass(lifter, Lifter):
l.debug("Registering lifter %s for architecture %s.", lifter.__name__, arch_name)
lifters[arch_name].append(lifter)
if issubclass(lifter, Postprocessor):
l.debug("Registering postprocessor %s for architecture %s.", lifter.__name__, arch_name)
postprocessors[arch_name].append(lifter)
|
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
|
def _clean_accents(self, text):
"""Remove most accent marks.
Note that the circumflexes over alphas and iotas in the text since
they determine vocalic quantity.
:param text: raw text
:return: clean text with minimum accent marks
:rtype : string
"""
accents = {
'ὲέἐἑἒἓἕἔ': 'ε',
'ὺύὑὐὒὓὔὕ': 'υ',
'ὸόὀὁὂὃὄὅ': 'ο',
'ὶίἰἱἲἳἵἴ': 'ι',
'ὰάἁἀἂἃἅἄᾳᾂᾃ': 'α',
'ὴήἠἡἢἣἥἤἧἦῆῄῂῇῃᾓᾒᾗᾖᾑᾐ': 'η',
'ὼώὠὡὢὣὤὥὦὧῶῲῴῷῳᾧᾦᾢᾣᾡᾠ': 'ω',
'ἶἷ': 'ῖ',
'ἆἇᾷᾆᾇ': 'ᾶ',
'ὖὗ': 'ῦ',
}
text = self._clean_text(text)
for char in text:
for key in accents.keys():
if char in key:
text = text.replace(char, accents.get(key))
else:
pass
return text
|
Remove most accent marks.
Note that the circumflexes over alphas and iotas in the text since
they determine vocalic quantity.
:param text: raw text
:return: clean text with minimum accent marks
:rtype : string
|
def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
"""
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
if __umr__.enabled:
__umr__.run()
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg)
if wdir is not None:
try:
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
os.chdir(wdir)
if post_mortem:
set_post_mortem()
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read())
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__')
except KeyError:
pass
|
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
|
def check_stripe_api_host(app_configs=None, **kwargs):
"""
Check that STRIPE_API_HOST is not being used in production.
"""
from django.conf import settings
messages = []
if not settings.DEBUG and hasattr(settings, "STRIPE_API_HOST"):
messages.append(
checks.Warning(
"STRIPE_API_HOST should not be set in production! This is most likely unintended.",
hint="Remove STRIPE_API_HOST from your Django settings.",
id="djstripe.W002",
)
)
return messages
|
Check that STRIPE_API_HOST is not being used in production.
|
def _class_type(klass, ancestors=None):
"""return a ClassDef node type to differ metaclass and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have an ancestor loop
if klass._type is not None:
return klass._type
if _is_metaclass(klass):
klass._type = "metaclass"
elif klass.name.endswith("Exception"):
klass._type = "exception"
else:
if ancestors is None:
ancestors = set()
klass_name = klass.qname()
if klass_name in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = "class"
return "class"
ancestors.add(klass_name)
for base in klass.ancestors(recurs=False):
name = _class_type(base, ancestors)
if name != "class":
if name == "metaclass" and not _is_metaclass(klass):
# don't propagate it if the current class
# can't be a metaclass
continue
klass._type = base.type
break
if klass._type is None:
klass._type = "class"
return klass._type
|
return a ClassDef node type to differ metaclass and exception
from 'regular' classes
|
def _is_redundant(self, matrix, cutoff=None):
"""Identify rdeundant rows in a matrix that can be removed."""
cutoff = 1.0 - self.feasibility_tol
# Avoid zero variances
extra_col = matrix[:, 0] + 1
# Avoid zero rows being correlated with constant rows
extra_col[matrix.sum(axis=1) == 0] = 2
corr = np.corrcoef(np.c_[matrix, extra_col])
corr = np.tril(corr, -1)
return (np.abs(corr) > cutoff).any(axis=1)
|
Identify rdeundant rows in a matrix that can be removed.
|
def authenticate(self, req, resp, resource):
"""
Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception`
"""
username, password = self._extract_credentials(req)
user = self.user_loader(username, password)
if not user:
raise falcon.HTTPUnauthorized(
description='Invalid Username/Password')
return user
|
Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception`
|
def region(self, start=0, end=None):
'''
Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format
'''
if end is None:
end = len(self.sequence)
return '>{}\n{}'.format(self.id, self.sequence[start:end])
|
Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format
|
def expire_file(filepath):
""" Expire a record for a missing file """
load_message.cache_clear()
orm.delete(pa for pa in model.PathAlias if pa.entry.file_path == filepath)
orm.delete(item for item in model.Entry if item.file_path == filepath)
orm.commit()
|
Expire a record for a missing file
|
def add_events(self, names, send_event=True, event_factory=None):
"""
Add event by name.
This is called for you as needed if you allow auto creation of events (see __init__).
Upon an event being added, all handlers are searched for if they have this event,
and if they do, they are added to the Event's list of callables.
:param tuple names: Names
"""
if not event_factory:
event_factory = self.event_factory
# Create events
self.events.update({name: event_factory() for name in names},)
# Inspect handlers to see if they should be attached to this new event
[self._attach_handler_events(handler, events=names) for handler in self.handlers]
if send_event:
[self.on_add_event(name) for name in names]
|
Add event by name.
This is called for you as needed if you allow auto creation of events (see __init__).
Upon an event being added, all handlers are searched for if they have this event,
and if they do, they are added to the Event's list of callables.
:param tuple names: Names
|
def __execute_str(self, instr):
"""Execute STR instruction.
"""
op0_val = self.read_operand(instr.operands[0])
self.write_operand(instr.operands[2], op0_val)
return None
|
Execute STR instruction.
|
def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):
"""
Convert degrees to quaternions
"""
t0 = math.cos(math.radians(yaw * 0.5))
t1 = math.sin(math.radians(yaw * 0.5))
t2 = math.cos(math.radians(roll * 0.5))
t3 = math.sin(math.radians(roll * 0.5))
t4 = math.cos(math.radians(pitch * 0.5))
t5 = math.sin(math.radians(pitch * 0.5))
w = t0 * t2 * t4 + t1 * t3 * t5
x = t0 * t3 * t4 - t1 * t2 * t5
y = t0 * t2 * t5 + t1 * t3 * t4
z = t1 * t2 * t4 - t0 * t3 * t5
return [w, x, y, z]
|
Convert degrees to quaternions
|
def hexblock_dword(cls, data, address = None,
bits = None,
separator = ' ',
width = 4):
"""
Dump a block of hexadecimal DWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@type width: int
@param width:
(Optional) Maximum number of DWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_dword, data,
address, bits, width * 4,
cb_kwargs = {'separator': separator})
|
Dump a block of hexadecimal DWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@type width: int
@param width:
(Optional) Maximum number of DWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
|
def add_action_view(self, name, url, actions, **kwargs):
"""Creates an ActionsView instance and registers it.
"""
view = ActionsView(name, url=url, self_var=self, **kwargs)
if isinstance(actions, dict):
for group, actions in actions.iteritems():
view.actions.extend(load_actions(actions, group=group or None))
else:
view.actions.extend(load_actions(actions))
self.add_view(view)
return view
|
Creates an ActionsView instance and registers it.
|
def register_standard (id, source_types, target_types, requirements = []):
""" Creates new instance of the 'generator' class and registers it.
Returns the creates instance.
Rationale: the instance is returned so that it's possible to first register
a generator and then call 'run' method on that generator, bypassing all
generator selection.
"""
g = Generator (id, False, source_types, target_types, requirements)
register (g)
return g
|
Creates new instance of the 'generator' class and registers it.
Returns the creates instance.
Rationale: the instance is returned so that it's possible to first register
a generator and then call 'run' method on that generator, bypassing all
generator selection.
|
def _GetAPFSVolumeIdentifiers(self, scan_node):
"""Determines the APFS volume identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: APFS volume identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the the scan node is invalid.
UserAbort: if the user requested to abort.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
# TODO: refactor self._volumes to use scan options.
if self._volumes:
if self._volumes == 'all':
volumes = range(1, volume_system.number_of_volumes + 1)
else:
volumes = self._volumes
selected_volume_identifiers = self._NormalizedVolumeIdentifiers(
volume_system, volumes, prefix='apfs')
if not set(selected_volume_identifiers).difference(volume_identifiers):
return selected_volume_identifiers
if len(volume_identifiers) > 1:
try:
volume_identifiers = self._PromptUserForAPFSVolumeIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix='apfs')
|
Determines the APFS volume identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: APFS volume identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the the scan node is invalid.
UserAbort: if the user requested to abort.
|
def calc_mass_2(mh,cm,nm,teff,logg):
""" Table A2 in Martig 2016 """
CplusN = calc_sum(mh,cm,nm)
t = teff/4000.
return (95.8689 - 10.4042*mh - 0.7266*mh**2
+ 41.3642*cm - 5.3242*cm*mh - 46.7792*cm**2
+ 15.0508*nm - 0.9342*nm*mh - 30.5159*nm*cm - 1.6083*nm**2
- 67.6093*CplusN + 7.0486*CplusN*mh + 133.5775*CplusN*cm + 38.9439*CplusN*nm - 88.9948*CplusN**2
- 144.1765*t + 5.1180*t*mh - 73.7690*t*cm - 15.2927*t*nm + 101.7482*t*CplusN + 27.7690*t**2
- 9.4246*logg + 1.5159*logg*mh + 16.0412*logg*cm + 1.3549*logg*nm - 18.6527*logg*CplusN + 28.8015*logg*t - 4.0982*logg**2)
|
Table A2 in Martig 2016
|
def mach2cas(Mach, H):
"""Mach number to Calibrated Airspeed"""
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas
|
Mach number to Calibrated Airspeed
|
def compute_composite_distance(distance, x, y):
"""
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
"""
## Validate inputs
_validate_composite_distance(distance)
distance = _convert_distance_names_to_functions(distance)
if not isinstance(x, dict) or not isinstance(y, dict):
raise TypeError("Inputs 'x' and 'y' must be in dictionary form. " +
"Selecting individual rows of an SFrame yields the " +
"correct format.")
ans = 0.
for d in distance:
ftrs, dist, weight = d
## Special check for multiple columns with levenshtein distance.
if dist == _tc.distances.levenshtein and len(ftrs) > 1:
raise ValueError("levenshtein distance cannot be used with multiple" +
"columns. Please concatenate strings into a single " +
"column before computing the distance.")
## Extract values for specified features.
a = {}
b = {}
for ftr in ftrs:
if type(x[ftr]) != type(y[ftr]):
if not isinstance(x[ftr], (int, float)) or not isinstance(y[ftr], (int, float)):
raise ValueError("Input data has different types.")
if isinstance(x[ftr], (int, float, str)):
a[ftr] = x[ftr]
b[ftr] = y[ftr]
elif isinstance(x[ftr], dict):
for key, val in _six.iteritems(x[ftr]):
a['{}.{}'.format(ftr, key)] = val
for key, val in _six.iteritems(y[ftr]):
b['{}.{}'.format(ftr, key)] = val
elif isinstance(x[ftr], (list, _array.array)):
for i, val in enumerate(x[ftr]):
a[i] = val
for i, val in enumerate(y[ftr]):
b[i] = val
else:
raise TypeError("Type of feature '{}' not understood.".format(ftr))
## Pull out the raw values for levenshtein
if dist == _tc.distances.levenshtein:
a = list(a.values())[0]
b = list(b.values())[0]
## Compute component distance and add to the total distance.
ans += weight * dist(a, b)
return ans
|
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
|
def hellinger(Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Hellinger distance between distributions, based on kNN
distances: \sqrt{1 - \int \sqrt{p q}}
Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces
H <= 1.
Returns a vector: one element for each K.
'''
bc = required
est = 1 - bc
np.maximum(est, 0, out=est)
if clamp:
np.minimum(est, 1, out=est)
np.sqrt(est, out=est)
return est
|
r'''
Estimate the Hellinger distance between distributions, based on kNN
distances: \sqrt{1 - \int \sqrt{p q}}
Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces
H <= 1.
Returns a vector: one element for each K.
|
def configure(self, ext):
"""Configures the given Extension object using this build configuration."""
ext.include_dirs += self.include_dirs
ext.library_dirs += self.library_dirs
ext.libraries += self.libraries
ext.extra_compile_args += self.extra_compile_args
ext.extra_link_args += self.extra_link_args
ext.extra_objects += self.extra_objects
|
Configures the given Extension object using this build configuration.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.