code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def add(self, chassis):
self.chassis_chain[chassis] = IxeChassis(self.session, chassis, len(self.chassis_chain) + 1)
self.chassis_chain[chassis].connect() | add chassis.
:param chassis: chassis IP address. |
def elapsed():
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
r = _get_report()
r.append_body(render.elapsed_time(step.elapsed_time))
result = '[ELAPSED]: {}\n'.format(timedelta(seconds=step.elapsed_time))
r.stdout_interceptor.write_source(result) | Displays the elapsed time since the step started running. |
def record_little_endian(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(self.extent_location, self.parent_directory_num) | A method to generate a string representing the little endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the little endian version of this Path Table Record. |
def schwefelmult(self, x, pen_fac=1e4):
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0] | multimodal Schwefel function with domain -500..500 |
def coerce(cls, arg):
try:
return cls(arg).value
except (ValueError, TypeError):
raise InvalidParameterDatatype("%s coerce error" % (cls.__name__,)) | Given an arg, return the appropriate value given the class. |
def breadcrumb(self):
ret = []
here = self
while here:
ret.append(here)
here = here.parent
return list(reversed(ret)) | Get the category hierarchy leading up to this category, including
root and self.
For example, path/to/long/category will return a list containing
Category('path'), Category('path/to'), and Category('path/to/long'). |
def rm_known_host(user=None, hostname=None, config=None, port=None):
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-R', ssh_hostname, '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
if not salt.utils.platform.is_windows():
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'removed', 'comment': cmd_result} | Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname> |
def list(self):
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result | Lists all sessions in the store.
.. versionadded:: 0.6 |
def warn(self, msg):
self.warnings.append(
self.state.document.reporter.warning(msg, line=self.lineno)
) | Add a warning message.
:param msg: The warning message to add.
:type msg: str |
def instances(self):
ist = lib.EnvGetNextInstanceInClass(self._env, self._cls, ffi.NULL)
while ist != ffi.NULL:
yield Instance(self._env, ist)
ist = lib.EnvGetNextInstanceInClass(self._env, self._cls, ist) | Iterate over the instances of the class. |
def to_networkx_graph(self, node_attribute_name='bias', edge_attribute_name='bias'):
import networkx as nx
BQM = nx.Graph()
BQM.add_nodes_from(((v, {node_attribute_name: bias, 'vartype': self.vartype})
for v, bias in iteritems(self.linear)))
BQM.add_edges_from(((u, v, {edge_attribute_name: bias}) for (u, v), bias in iteritems(self.quadratic)))
BQM.offset = self.offset
BQM.vartype = self.vartype
return BQM | Convert a binary quadratic model to NetworkX graph format.
Args:
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases.
Returns:
:class:`networkx.Graph`: A NetworkX graph with biases stored as
node/edge attributes.
Examples:
This example converts a binary quadratic model to a NetworkX graph, using first
the default attribute name for quadratic biases then "weight".
>>> import networkx as nx
>>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
... {(0, 1): .5, (1, 2): 1.5},
... 1.4,
... dimod.SPIN)
>>> BQM = bqm.to_networkx_graph()
>>> BQM[0][1]['bias']
0.5
>>> BQM.node[0]['bias']
1
>>> BQM_w = bqm.to_networkx_graph(edge_attribute_name='weight')
>>> BQM_w[0][1]['weight']
0.5 |
def name_check(self, original, loc, tokens):
internal_assert(len(tokens) == 1, "invalid name tokens", tokens)
if self.strict:
self.unused_imports.discard(tokens[0])
if tokens[0] == "exec":
return self.check_py("3", "exec function", original, loc, tokens)
elif tokens[0].startswith(reserved_prefix):
raise self.make_err(CoconutSyntaxError, "variable names cannot start with reserved prefix " + reserved_prefix, original, loc)
else:
return tokens[0] | Check the given base name. |
def _get_projection(self):
try:
proj_str = self.nc.attrs['gdal_projection']
except TypeError:
proj_str = self.nc.attrs['gdal_projection'].decode()
radius_a = proj_str.split('+a=')[-1].split()[0]
if float(radius_a) > 10e3:
units = 'm'
scale = 1.0
else:
units = 'km'
scale = 1e3
if 'units' not in proj_str:
proj_str = proj_str + ' +units=' + units
area_extent = (float(self.nc.attrs['gdal_xgeo_up_left']) / scale,
float(self.nc.attrs['gdal_ygeo_low_right']) / scale,
float(self.nc.attrs['gdal_xgeo_low_right']) / scale,
float(self.nc.attrs['gdal_ygeo_up_left']) / scale)
return proj_str, area_extent | Get projection from the NetCDF4 attributes |
def stream_decode_response_unicode(iterator, r):
encoding = get_encoding_from_headers(r.headers)
if encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
if rv:
yield rv | Stream decodes a iterator. |
def get_zones(self):
home_data = self.get_home()
if not home_data['isSuccess']:
return []
zones = []
for receiver in home_data['data']['receivers']:
for zone in receiver['zones']:
zones.append(zone)
return zones | Get all zones |
def labels_to_onehots(labels, num_classes):
batch_size = labels.get_shape().as_list()[0]
with tf.name_scope("one_hot"):
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs")
onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes],
1.0, 0.0)
return onehots | Convert a vector of integer class labels to a matrix of one-hot target vectors.
:param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,).
:param num_classes: the total number of classes
:return: has shape (batch_size, num_classes) |
def equiv(self, other):
if self == other:
return True
elif (not isinstance(other, Weighting) or
self.exponent != other.exponent):
return False
elif isinstance(other, MatrixWeighting):
return other.equiv(self)
elif isinstance(other, ConstWeighting):
return np.array_equiv(self.array, other.const)
else:
return np.array_equal(self.array, other.array) | Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants. |
def check_data_port_connection(self, check_data_port):
for data_flow in self.data_flows.values():
from_port = self.get_data_port(data_flow.from_state, data_flow.from_key)
to_port = self.get_data_port(data_flow.to_state, data_flow.to_key)
if check_data_port is from_port or check_data_port is to_port:
if not (from_port.data_type is object or to_port.data_type is object):
if not type_inherits_of_type(from_port.data_type, to_port.data_type):
return False, "Connection of two non-compatible data types"
return True, "valid" | Checks the connection validity of a data port
The method is called by a child state to check the validity of a data port in case it is connected with data
flows. The data port does not belong to 'self', but to one of self.states.
If the data port is connected to a data flow, the method checks, whether these connect consistent data types
of ports.
:param rafcon.core.data_port.DataPort check_data_port: The port to check
:return: valid, message |
def required(self, method, _dict, require):
for key in require:
if key not in _dict:
raise LunrError("'%s' is required argument for method '%s'"
% (key, method)) | Ensure the required items are in the dictionary |
def at_line(self, line: FileLine) -> Iterator[InsertionPoint]:
logger.debug("finding insertion points at line: %s", str(line))
filename = line.filename
line_num = line.num
for ins in self.in_file(filename):
if line_num == ins.location.line:
logger.debug("found insertion point at line [%s]: %s",
str(line), ins)
yield ins | Returns an iterator over all of the insertion points located at a
given line. |
def _access_token(self, request: Request=None, page_id: Text=''):
if not page_id:
msg = request.message
page_id = msg.get_page_id()
page = self.settings()
if page['page_id'] == page_id:
return page['page_token']
raise PlatformOperationError('Trying to get access token of the '
'page "{}", which is not configured.'
.format(page_id)) | Guess the access token for that specific request. |
def existing_path(value):
if os.path.exists(value):
return value
else:
raise argparse.ArgumentTypeError("Path {0} not found".format(value)) | Throws when the path does not exist |
def _textlist(self, _addtail=False):
result = []
if (not _addtail) and (self.text is not None):
result.append(self.text)
for elem in self:
result.extend(elem.textlist(True))
if _addtail and self.tail is not None:
result.append(self.tail)
return result | Returns a list of text strings contained within an element and its sub-elements.
Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook). |
def modified_files(root, tracked_only=False, commit=None):
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
command = ['hg', 'status']
if commit:
command.append('--change=%s' % commit)
status_lines = subprocess.check_output(command).decode('utf-8').split(
os.linesep)
modes = ['M', 'A']
if not tracked_only:
modes.append(r'\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, filename), mode)
for filename, mode in modified_file_status) | Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status. |
def frontendediting_request_processor(page, request):
if 'frontend_editing' not in request.GET:
return
response = HttpResponseRedirect(request.path)
if request.user.has_module_perms('page'):
if 'frontend_editing' in request.GET:
try:
enable_fe = int(request.GET['frontend_editing']) > 0
except ValueError:
enable_fe = False
if enable_fe:
response.set_cookie(str('frontend_editing'), enable_fe)
clear_cache()
else:
response.delete_cookie(str('frontend_editing'))
clear_cache()
else:
response.delete_cookie(str('frontend_editing'))
return response | Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions. |
def remove_raw(self, length_tag, value_tag):
self.raw_len_tags.remove(length_tag)
self.raw_data_tags.remove(value_tag)
return | Remove the tags for a data type field.
:param length_tag: tag number of the length field.
:param value_tag: tag number of the value field.
You can remove either private or standard data field definitions in
case a particular application uses them for a field of a different
type. |
def deltas(errors, epsilon, mean, std):
below = errors[errors <= epsilon]
if not len(below):
return 0, 0
return mean - below.mean(), std - below.std() | Compute mean and std deltas.
delta_mean = mean(errors) - mean(all errors below epsilon)
delta_std = std(errors) - std(all errors below epsilon) |
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5):
if 'event_offset' in reference_event and 'event_offset' in estimated_event:
annotated_length = reference_event['event_offset'] - reference_event['event_onset']
return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length)
elif 'offset' in reference_event and 'offset' in estimated_event:
annotated_length = reference_event['offset'] - reference_event['onset']
return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length) | Validate estimated event based on event offset
Parameters
----------
reference_event : dict
Reference event.
estimated_event : dict
Estimated event.
t_collar : float > 0, seconds
First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation.
Default value 0.2
percentage_of_length : float in [0, 1]
Second condition, percentage of the length within which the estimated offset has to be in order to be
consider valid estimation.
Default value 0.5
Returns
-------
bool |
def displaceAbs(x, y, sourcePos_x, sourcePos_y):
x_mapped = x - sourcePos_x
y_mapped = y - sourcePos_y
absmapped = np.sqrt(x_mapped**2+y_mapped**2)
return absmapped | calculates a grid of distances to the observer in angel
:param mapped_cartcoord: mapped cartesian coordinates
:type mapped_cartcoord: numpy array (n,2)
:param sourcePos: source position
:type sourcePos: numpy vector [x0,y0]
:returns: array of displacement
:raises: AttributeError, KeyError |
def teardown(self):
if not self._torn:
self._expectations = []
self._torn = True
self._teardown() | Clean up all expectations and restore the original attribute of the
mocked object. |
def draw(self, milliseconds, surface):
super(CollidableObj, self).draw(milliseconds, surface) | Render the bounds of this collision ojbect onto the specified surface. |
def clean_names(lines, ensure_unique_names=False, strip_prefix=False,
make_database_safe=False):
names = {}
for row in lines:
if strip_prefix:
row['name'] = row['name'][row['name'].find('-') + 1:]
if row['indexed_by'] is not None:
row['indexed_by'] = row['indexed_by'][row['indexed_by'].find(
'-') + 1:]
if ensure_unique_names:
i = 1
while (row['name'] if i == 1 else
row['name'] + "-" + str(i)) in names:
i += 1
names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1
if i > 1:
row['name'] = row['name'] + "-" + str(i)
if make_database_safe:
row['name'] = row['name'].replace("-", "_")
return lines | Clean the names.
Options to:
- strip prefixes on names
- enforce unique names
- make database safe names by converting - to _ |
def parse_line(self, line):
if line == '':
return
if regex_comment.search(line):
return
global_parameters = regex_global.search(line)
if global_parameters:
self.parse_global_meta(global_parameters.group('parameters'))
return
crtf_line = regex_line.search(line)
if crtf_line:
region = regex_region.search(crtf_line.group('region'))
type_ = region.group('type') or 'reg'
include = region.group('include') or '+'
region_type = region.group('regiontype').lower()
if region_type in self.valid_definition:
helper = CRTFRegionParser(self.global_meta, include, type_, region_type,
*crtf_line.group('region', 'parameters'))
self.shapes.append(helper.shape)
else:
self._raise_error("Not a valid CRTF Region type: '{0}'.".format(region_type))
else:
self._raise_error("Not a valid CRTF line: '{0}'.".format(line))
return | Parses a single line. |
def join(self):
pending = set()
exceptions = set()
while len(self._tasks) > 0 or len(pending) > 0:
while len(self._tasks) > 0 and len(pending) < self._concurrency:
task, args, kwargs = self._tasks.pop(0)
pending.add(task(*args, **kwargs))
(done, pending) = yield from asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
if task.exception():
exceptions.add(task.exception())
if len(exceptions) > 0:
raise exceptions.pop() | Wait for all task to finish |
def dskrb2(vrtces, plates, corsys, corpar):
nv = ctypes.c_int(len(vrtces))
vrtces = stypes.toDoubleMatrix(vrtces)
np = ctypes.c_int(len(plates))
plates = stypes.toIntMatrix(plates)
corsys = ctypes.c_int(corsys)
corpar = stypes.toDoubleVector(corpar)
mncor3 = ctypes.c_double(0.0)
mxcor3 = ctypes.c_double(0.0)
libspice.dskrb2_c(nv, vrtces, np, plates, corsys, corpar, ctypes.byref(mncor3), ctypes.byref(mxcor3))
return mncor3.value, mxcor3.value | Determine range bounds for a set of triangular plates to
be stored in a type 2 DSK segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskrb2_c.html
:param vrtces: Vertices
:type vrtces: NxM-Element Array of floats
:param plates: Plates
:type plates: NxM-Element Array of ints
:param corsys: DSK coordinate system code
:type corsys: int
:param corpar: DSK coordinate system parameters
:type corpar: N-Element Array of floats
:return: Lower and Upper bound on range of third coordinate
:rtype: tuple |
def check_install_conflicts(to_install):
package_set, _ = create_package_set_from_installed()
would_be_installed = _simulate_installation_of(to_install, package_set)
whitelist = _create_whitelist(would_be_installed, package_set)
return (
package_set,
check_package_set(
package_set, should_ignore=lambda name: name not in whitelist
)
) | For checking if the dependency graph would be consistent after \
installing given requirements |
def constrain_norms(self, srcNames, cov_scale=1.0):
for name in srcNames:
par = self.like.normPar(name)
err = par.error()
val = par.getValue()
if par.error() == 0.0 or not par.isFree():
continue
self.add_gauss_prior(name, par.getName(),
val, err * cov_scale) | Constrain the normalizations of one or more sources by
adding gaussian priors with sigma equal to the parameter
error times a scaling factor. |
def RecursiveMultiListChildren(self, urns, limit=None, age=NEWEST_TIME):
checked_urns = set()
urns_to_check = urns
while True:
found_children = []
for subject, values in self.MultiListChildren(
urns_to_check, limit=limit, age=age):
found_children.extend(values)
yield subject, values
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break | Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])] |
def parse(self, data):
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == '**':
if path_wildcard:
raise ValidationException(
'validation error: path template cannot contain more '
'than one path wildcard')
path_wildcard = True
return segments | Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment. |
def get_next_index(self, matrix, manipulation, indices_left):
f = manipulation[0]
indices = list(indices_left.intersection(manipulation[2]))
sums = np.sum(matrix[indices], axis=1)
if f < 1:
next_index = indices[sums.argmax(axis=0)]
else:
next_index = indices[sums.argmin(axis=0)]
return next_index | Returns an index that should have the most negative effect on the
matrix sum |
def run_deploy_website(restart_apache=False, restart_uwsgi=False,
restart_nginx=False):
run_git_pull()
run_pip_install()
run_rsync_project()
run_syncdb()
run_collectstatic()
if getattr(settings, 'MAKEMESSAGES_ON_DEPLOYMENT', False):
run_makemessages()
if getattr(settings, 'COMPILEMESSAGES_ON_DEPLOYMENT', False):
run_compilemessages()
if restart_apache:
run_restart_apache()
if restart_uwsgi:
run_restart_uwsgi()
if restart_nginx:
run_restart_nginx()
else:
run_touch_wsgi() | Executes all tasks necessary to deploy the website on the given server.
Usage::
fab <server> run_deploy_website |
def get_or_create_environment(self, repo: str, branch: str, git_repo: Repo, repo_path: Path) -> str:
return sys.executable | Returns the path to the current Python executable. |
def _create_merge_filelist(bam_files, base_file, config):
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | Create list of input files for merge, ensuring all files are valid. |
def put_and_track(self, url, payload, refresh_rate_sec=1):
if not url.startswith('/v1/procedures'):
raise Exception("The only supported route is /v1/procedures")
parts = url.split('/')
len_parts = len(parts)
if len_parts not in [4, 6]:
raise Exception(
"You must either PUT a procedure or a procedure run")
proc_id = parts[3]
run_id = None
if len_parts == 4:
if 'params' not in payload:
payload['params'] = {}
payload['params']['runOnCreation'] = True
elif len_parts == 6:
run_id = parts[-1]
pm = ProgressMonitor(self, refresh_rate_sec, proc_id, run_id,
self.notebook)
t = threading.Thread(target=pm.monitor_progress)
t.start()
try:
return self.put(url, payload)
except Exception as e:
print(e)
finally:
pass
pm.event.set()
t.join() | Put and track progress, displaying progress bars.
May display the wrong progress if 2 things post/put on the same
procedure name at the same time. |
def gpio_get(self, pins=None):
if pins is None:
pins = range(4)
size = len(pins)
indices = (ctypes.c_uint8 * size)(*pins)
statuses = (ctypes.c_uint8 * size)()
result = self._dll.JLINK_EMU_GPIO_GetState(ctypes.byref(indices),
ctypes.byref(statuses),
size)
if result < 0:
raise errors.JLinkException(result)
return list(statuses) | Returns a list of states for the given pins.
Defaults to the first four pins if an argument is not given.
Args:
self (JLink): the ``JLink`` instance
pins (list): indices of the GPIO pins whose states are requested
Returns:
A list of states.
Raises:
JLinkException: on error. |
def create_app(path=None, user_content=False, context=None, username=None,
password=None, render_offline=False, render_wide=False,
render_inline=False, api_url=None, title=None, text=None,
autorefresh=None, quiet=None, grip_class=None):
if grip_class is None:
grip_class = Grip
if text is not None:
display_filename = DirectoryReader(path, True).filename_for(None)
source = TextReader(text, display_filename)
elif path == '-':
source = StdinReader()
else:
source = DirectoryReader(path)
if render_offline:
renderer = OfflineRenderer(user_content, context)
elif user_content or context or api_url:
renderer = GitHubRenderer(user_content, context, api_url)
else:
renderer = None
auth = (username, password) if username or password else None
return grip_class(source, auth, renderer, None, render_wide,
render_inline, title, autorefresh, quiet) | Creates a Grip application with the specified overrides. |
def list_targets(Rule,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
targets = conn.list_targets_by_rule(Rule=Rule)
ret = []
if targets and 'Targets' in targets:
keys = ('Id', 'Arn', 'Input',
'InputPath')
for target in targets.get('Targets'):
ret.append(dict([(k, target.get(k)) for k in keys if k in target]))
return {'targets': ret}
else:
return {'targets': None}
except ClientError as e:
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'RuleNotFoundException':
return {'error': "Rule {0} not found".format(Rule)}
return {'error': __utils__['boto3.get_error'](e)} | Given a rule name list the targets of that rule.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_cloudwatch_event.list_targets myrule |
def _mean_of_runs(stats, key='runs'):
num_runs = len(stats[key])
first = stats[key][0]
mean = {}
for stat_key in first:
if isinstance(first[stat_key], numbers.Number):
mean[stat_key] = sum(run[stat_key]
for run in stats[key]) / float(num_runs)
return mean | Obtain the mean of stats.
Args:
stats: dict; A set of stats, structured as above.
key: str; Optional key to determine where list of runs is found in stats |
def zoom_reset(self):
self._zoom_factor = self._zoom_factors[0] if self._zoom_default == 0 else self._zoom_default
if self._zoom_factors.index(self._zoom_factor) == 0:
self._button_zoom_out.config(state=tk.DISABLED)
self._button_zoom_in.config(state=tk.NORMAL)
elif self._zoom_factors.index(self.zoom_factor) + 1 == len(self._zoom_factors):
self._button_zoom_out.config(state=tk.NORMAL)
self._button_zoom_in.config(state=tk.DISABLED)
self.draw_timeline() | Reset the zoom factor to default and redraw TimeLine |
def get_max_bitlen(self):
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen()
return {
self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(),
self.MODE_STATIC: payload_max_bitlen
}[self.mode] | Returns total maximum bit length of the array, including length field if applicable. |
def _zom_name(lexer):
tok = next(lexer)
if isinstance(tok, DOT):
first = _expect_token(lexer, {NameToken}).value
rest = _zom_name(lexer)
return (first, ) + rest
else:
lexer.unpop_token(tok)
return tuple() | Return zero or more names. |
def find(self, name, namespace=None):
if "." in name:
namespace, name = name.rsplit(".", 1)
caret = self.raw
if namespace:
for term in namespace.split('.'):
if term not in caret:
caret[term] = Bunch()
caret = caret[term]
return caret[name] | Find plugin object
Parameters
----------
name : string
A name of the object entry or full namespace
namespace : string, optional
A period separated namespace. E.g. `foo.bar.hogehoge`
Returns
-------
instance
An instance found
Raises
------
KeyError
If the named instance have not registered
Examples
--------
>>> registry = Registry()
>>> registry.register('hello', 'goodbye')
>>> registry.register('foo', 'bar', 'hoge.hoge.hoge')
>>> registry.register('foobar', 'foobar', 'hoge.hoge')
>>> registry.find('hello') == 'goodbye'
True
>>> registry.find('foo', 'hoge.hoge.hoge') == 'bar'
True
>>> registry.find('hoge.hoge.foobar') == 'foobar'
True |
def scalar(name, data, step=None, description=None):
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
tf.debugging.assert_scalar(data)
return tf.summary.write(tag=tag,
tensor=tf.cast(data, tf.float32),
step=step,
metadata=summary_metadata) | Write a scalar summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A real numeric scalar value, convertible to a `float32` Tensor.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None. |
def get_all_unresolved(self):
assert self.final, 'Call build() before using the graph.'
out = set()
for v in self.broken_deps.values():
out |= v
return out | Returns a set of all unresolved imports. |
def _isdst(dt):
if type(dt) == datetime.date:
dt = datetime.datetime.combine(dt, datetime.datetime.min.time())
dtc = dt.replace(year=datetime.datetime.now().year)
if time.localtime(dtc.timestamp()).tm_isdst == 1:
return True
return False | Check if date is in dst. |
def _get_line_styles(marker_str):
def _extract_marker_value(marker_str, code_dict):
val = None
for code in code_dict:
if code in marker_str:
val = code_dict[code]
break
return val
return [_extract_marker_value(marker_str, code_dict) for
code_dict in [LINE_STYLE_CODES, COLOR_CODES, MARKER_CODES]] | Return line style, color and marker type from specified marker string.
For example, if ``marker_str`` is 'g-o' then the method returns
``('solid', 'green', 'circle')``. |
def to_package(self, repo_url):
return Package(name=self.name, url=repo_url + self.name) | Return the package representation of this repo. |
def limits(self,variable):
(vmin,vmax), = self.SELECT('min(%(variable)s), max(%(variable)s)' % vars())
return vmin,vmax | Return minimum and maximum of variable across all rows of data. |
def fetch_events(cursor, config, account_name):
query = config['indexer'].get('query',
'select * from events where user_agent glob \'*CloudCustodian*\'')
for event in cursor.execute(query):
event['account'] = account_name
event['_index'] = config['indexer']['idx_name']
event['_type'] = config['indexer'].get('idx_type', 'traildb')
yield event | Generator that returns the events |
def calcMzFromMass(mass, charge):
mz = (mass + (maspy.constants.atomicMassProton * charge)) / charge
return mz | Calculate the mz value of a peptide from its mass and charge.
:param mass: float, exact non protonated mass
:param charge: int, charge state
:returns: mass to charge ratio of the specified charge state |
def collect(self):
for app_name, tools_path in get_apps_tools().items():
self.stdout.write("Copying files from '{}'.".format(tools_path))
app_name = app_name.replace('.', '_')
app_destination_path = os.path.join(self.destination_path, app_name)
if not os.path.isdir(app_destination_path):
os.mkdir(app_destination_path)
for root, dirs, files in os.walk(tools_path):
for dir_name in dirs:
dir_source_path = os.path.join(root, dir_name)
dir_destination_path = self.change_path_prefix(
dir_source_path, tools_path, self.destination_path, app_name
)
if not os.path.isdir(dir_destination_path):
os.mkdir(dir_destination_path)
for file_name in files:
file_source_path = os.path.join(root, file_name)
file_destination_path = self.change_path_prefix(
file_source_path, tools_path, self.destination_path, app_name
)
shutil.copy2(file_source_path, file_destination_path) | Get tools' locations and copy them to a single location. |
def _fetch(self, searchtype, fields, **kwargs):
fields['vintage'] = self.vintage
fields['benchmark'] = self.benchmark
fields['format'] = 'json'
if 'layers' in kwargs:
fields['layers'] = kwargs['layers']
returntype = kwargs.get('returntype', 'geographies')
url = self._geturl(searchtype, returntype)
try:
with requests.get(url, params=fields, timeout=kwargs.get('timeout')) as r:
content = r.json()
if "addressMatches" in content.get('result', {}):
return AddressResult(content)
if "geographies" in content.get('result', {}):
return GeographyResult(content)
raise ValueError()
except (ValueError, KeyError):
raise ValueError("Unable to parse response from Census")
except RequestException as e:
raise e | Fetch a response from the Geocoding API. |
def diff(xi, yi, order=1) -> np.ndarray:
yi = np.array(yi).copy()
flip = False
if xi[-1] < xi[0]:
xi = np.flipud(xi.copy())
yi = np.flipud(yi)
flip = True
midpoints = (xi[1:] + xi[:-1]) / 2
for _ in range(order):
d = np.diff(yi)
d /= np.diff(xi)
yi = np.interp(xi, midpoints, d)
if flip:
yi = np.flipud(yi)
return yi | Take the numerical derivative of a 1D array.
Output is mapped onto the original coordinates using linear interpolation.
Expects monotonic xi values.
Parameters
----------
xi : 1D array-like
Coordinates.
yi : 1D array-like
Values.
order : positive integer (optional)
Order of differentiation.
Returns
-------
1D numpy array
Numerical derivative. Has the same shape as the input arrays. |
async def ack(self, msg):
ack_proto = protocol.Ack()
ack_proto.subject = msg.proto.subject
ack_proto.sequence = msg.proto.sequence
await self._nc.publish(msg.sub.ack_inbox, ack_proto.SerializeToString()) | Used to manually acks a message.
:param msg: Message which is pending to be acked by client. |
def generate_signed_url(
self,
expiration=None,
api_access_endpoint=_API_ACCESS_ENDPOINT,
method="GET",
headers=None,
query_parameters=None,
client=None,
credentials=None,
version=None,
):
if version is None:
version = "v2"
elif version not in ("v2", "v4"):
raise ValueError("'version' must be either 'v2' or 'v4'")
resource = "/{bucket_name}".format(bucket_name=self.name)
if credentials is None:
client = self._require_client(client)
credentials = client._credentials
if version == "v2":
helper = generate_signed_url_v2
else:
helper = generate_signed_url_v4
return helper(
credentials,
resource=resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method.upper(),
headers=headers,
query_parameters=query_parameters,
) | Generates a signed URL for this bucket.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL using GCE service account. Follow `Issue 50`_ for updates on
this. If you'd like to be able to generate a signed URL from GCE,
you can use a standard service account from a JSON file rather
than a GCE service account.
.. _Issue 50: https://github.com/GoogleCloudPlatform/\
google-auth-library-python/issues/50
If you have a bucket that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible buckets, but don't want to require users to explicitly
log in.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:type api_access_endpoint: str
:param api_access_endpoint: Optional URI base.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type headers: dict
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:type version: str
:param version: (Optional) The version of signed credential to create.
Must be one of 'v2' | 'v4'.
:raises: :exc:`ValueError` when version is invalid.
:raises: :exc:`TypeError` when expiration is not a valid type.
:raises: :exc:`AttributeError` if credentials is not an instance
of :class:`google.auth.credentials.Signing`.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration. |
def nice_identifier():
'do not use uuid.uuid4, because it can block'
big = reduce(mul, struct.unpack('<LLLL', os.urandom(16)), 1)
big = big % 2**128
return uuid.UUID(int=big).hex | do not use uuid.uuid4, because it can block |
def _send_chunk(self, index, chunk):
self._pending_chunks += 1
self.outbox.put((index, chunk)) | Send the current chunk to the workers for processing.
Called when the _partial_chunk is complete.
Blocks when the outbox is full. |
def specialspaceless(parser, token):
nodelist = parser.parse(('endspecialspaceless',))
parser.delete_first_token()
return SpecialSpacelessNode(nodelist) | Removes whitespace between HTML tags, and introduces a whitespace
after buttons an inputs, necessary for Bootstrap to place them
correctly in the layout. |
def dumpJson(obj, **kwargs):
def handleDateAndBinaryForJs(x):
if six.PY3 and isinstance(x, six.binary_type):
x = x.decode()
if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
return stringDate(x)
else:
return x
d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs)
assert '\n' not in d
return d | Match JS's JSON.stringify. When using the default seperators,
base64 encoding JSON results in \n sequences in the output. Hawk
barfs in your face if you have that in the text |
def _lookup_generic_scalar(self,
obj,
as_of_date,
country_code,
matches,
missing):
result = self._lookup_generic_scalar_helper(
obj, as_of_date, country_code,
)
if result is not None:
matches.append(result)
else:
missing.append(obj) | Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing. |
def package_in_memory(cls, workflow_name, workflow_files):
s = StringIO()
p = cls(s, workflow_name, meta_data=[])
p.add_bpmn_files_by_glob(workflow_files)
p.create_package()
return s.getvalue() | Generates wf packages from workflow diagrams.
Args:
workflow_name: Name of wf
workflow_files: Diagram file.
Returns:
Workflow package (file like) object |
def download_files_if_not_in_manifest(files_iterator, output_path):
local_manifest = read_local_manifest(output_path)
with open(get_local_manifest_path(output_path), 'a') as manifest_fh:
for (file_name, width) in files_iterator:
if is_file_in_manifest(file_name, width, local_manifest):
logging.info('Skipping file %s', file_name)
continue
try:
download_file(file_name, output_path, width=width)
write_file_to_manifest(file_name, width, manifest_fh)
except DownloadException, e:
logging.error("Could not download %s: %s", file_name, e.message) | Download the given files to the given path, unless in manifest. |
def _load_words(self):
with open(self._words_file, 'r') as f:
self._censor_list = [line.strip() for line in f.readlines()] | Loads the list of profane words from file. |
def PenForNode( self, node, depth=0 ):
if node == self.selectedNode:
return self.SELECTED_PEN
return self.DEFAULT_PEN | Determine the pen to use to display the given node |
def cli(ctx, project_dir):
exit_code = SCons(project_dir).sim()
ctx.exit(exit_code) | Launch the verilog simulation. |
def search(self, keyword, children=None, arg=None):
if children is None:
children = self.substmts
return [ ch for ch in children
if (ch.keyword == keyword and
(arg is None or ch.arg == arg))] | Return list of receiver's substmts with `keyword`. |
def import_object(path):
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
rr = importlib.import_module(path)
return rr | Import an object given its fully qualified name. |
def create_floatingip(floating_network, port=None, profile=None):
conn = _auth(profile)
return conn.create_floatingip(floating_network, port) | Creates a new floatingIP
CLI Example:
.. code-block:: bash
salt '*' neutron.create_floatingip network-name port-name
:param floating_network: Network name or ID to allocate floatingIP from
:param port: Of the port to be associated with the floatingIP (Optional)
:param profile: Profile to build on (Optional)
:return: Created floatingIP information |
def _get_hba_type(hba_type):
if hba_type == "parallel":
return vim.host.ParallelScsiHba
elif hba_type == "block":
return vim.host.BlockHba
elif hba_type == "iscsi":
return vim.host.InternetScsiHba
elif hba_type == "fibre":
return vim.host.FibreChannelHba
raise ValueError('Unknown Host Bus Adapter Type') | Convert a string representation of a HostHostBusAdapter into an
object reference. |
def _list_to_seq(lst):
ml = autoclass('scala.collection.mutable.MutableList')()
for element in lst:
ml.appendElem(element)
return ml | Return a scala.collection.Seq from a Python list. |
def dispatch(self):
'Perform dispatch, using request embedded within flask global state'
import flask
body = flask.request.get_json()
return self. dispatch_with_args(body, argMap=dict()) | Perform dispatch, using request embedded within flask global state |
def generate_take(out_f, steps, line_prefix):
out_f.write(
'{0}constexpr inline int take(int n_)\n'
'{0}{{\n'
'{0} return {1} 0 {2};\n'
'{0}}}\n'
'\n'.format(
line_prefix,
''.join('n_ >= {0} ? {0} : ('.format(s) for s in steps),
')' * len(steps)
)
) | Generate the take function |
def get_eager_datasource(cls, session, datasource_type, datasource_id):
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
) | Returns datasource with columns and metrics. |
def teardown_global_logging():
global global_logging_started
if not global_logging_started:
return
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
if sys.stdout is stdout_logger:
sys.stdout = sys.stdout.stream
if sys.stderr is stderr_logger:
sys.stderr = sys.stderr.stream
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type is not None:
sys.excepthook(exc_type, exc_value, exc_traceback)
del exc_type
del exc_value
del exc_traceback
if not PY3K:
sys.exc_clear()
del sys.excepthook
logging.captureWarnings(False)
rawinput = 'input' if PY3K else 'raw_input'
if hasattr(builtins, '_original_raw_input'):
setattr(builtins, rawinput, builtins._original_raw_input)
del builtins._original_raw_input
global_logging_started = False | Disable global logging of stdio, warnings, and exceptions. |
def from_wms(cls, filename, vector, resolution, destination_file=None):
doc = wms_vrt(filename,
bounds=vector,
resolution=resolution).tostring()
filename = cls._save_to_destination_file(doc, destination_file)
return GeoRaster2.open(filename) | Create georaster from the web service definition file. |
def find_package(name, installed, package=False):
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None | Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths. |
def from_file(cls, filename):
with zopen(filename) as f:
return cls.from_string(f.read()) | Read an Fiesta input from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
FiestaInput object |
def deserialize_instance(model, data={}):
"Translate raw data into a model instance."
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret | Translate raw data into a model instance. |
def _quantityToReal(self, quantity):
if not quantity:
return 1.0
try:
return float(quantity.replace(',', '.'))
except ValueError:
pass
try:
return float(self.ptc.numbers[quantity])
except KeyError:
pass
return 0.0 | Convert a quantity, either spelled-out or numeric, to a float
@type quantity: string
@param quantity: quantity to parse to float
@rtype: int
@return: the quantity as an float, defaulting to 0.0 |
def tablespace_create(name, location, options=None, owner=None, user=None,
host=None, port=None, maintenance_db=None, password=None,
runas=None):
owner_query = ''
options_query = ''
if owner:
owner_query = 'OWNER "{0}"'.format(owner)
if options:
optionstext = ['{0} = {1}'.format(k, v) for k, v in six.iteritems(options)]
options_query = 'WITH ( {0} )'.format(', '.join(optionstext))
query = 'CREATE TABLESPACE "{0}" {1} LOCATION \'{2}\' {3}'.format(name,
owner_query,
location,
options_query)
ret = _psql_prepare_and_run(['-c', query],
user=user, host=host, port=port,
maintenance_db=maintenance_db,
password=password, runas=runas)
return ret['retcode'] == 0 | Adds a tablespace to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.tablespace_create tablespacename '/path/datadir'
.. versionadded:: 2015.8.0 |
def put_subsegment(self, subsegment):
entity = self.get_trace_entity()
if not entity:
log.warning("Active segment or subsegment not found. Discarded %s." % subsegment.name)
return
entity.add_subsegment(subsegment)
self._local.entities.append(subsegment) | Store the subsegment created by ``xray_recorder`` to the context.
If you put a new subsegment while there is already an open subsegment,
the new subsegment becomes the child of the existing subsegment. |
def procedures(self, *a, **kw):
fut = self._run_operation(self._impl.procedures, *a, **kw)
return fut | Executes SQLProcedures and creates a result set of information
about the procedures in the data source. |
def _build_signature(self):
sig_contents = \
self.payload + "." + \
b64encode(b"application/xml").decode("ascii") + "." + \
b64encode(b"base64url").decode("ascii") + "." + \
b64encode(b"RSA-SHA256").decode("ascii")
sig_hash = SHA256.new(sig_contents.encode("ascii"))
cipher = PKCS1_v1_5.new(self.private_key)
sig = urlsafe_b64encode(cipher.sign(sig_hash))
key_id = urlsafe_b64encode(bytes(self.author_handle, encoding="utf-8"))
return sig, key_id | Create the signature using the private key. |
def _nbinom_ztrunc_p(mu, k_agg):
p_eq = lambda p, mu, k_agg: (k_agg * p) / (1 - (1 + p)**-k_agg) - mu
p = optim.brentq(p_eq, 1e-10, 1e10, args=(mu, k_agg))
return p | Calculates p parameter for truncated negative binomial
Function given in Sampford 1955, equation 4
Note that omega = 1 / 1 + p in Sampford |
def load_schema(filename, context=None):
table = import_from_uri(filename)
field_names = table.field_names
assert "field_name" in field_names
assert "field_type" in field_names
context = context or {
key.replace("Field", "").lower(): getattr(rows.fields, key)
for key in dir(rows.fields)
if "Field" in key and key != "Field"
}
return OrderedDict(
[
(row.field_name, context[row.field_type])
for row in table
]
) | Load schema from file in any of the supported formats
The table must have at least the fields `field_name` and `field_type`.
`context` is a `dict` with field_type as key pointing to field class, like:
{"text": rows.fields.TextField, "value": MyCustomField} |
def do_imports(self):
self.do_import('worker_class', Worker)
self.do_import('queue_model', self.options.worker_class.queue_model)
self.do_import('error_model', self.options.worker_class.error_model)
self.do_import('callback', self.options.worker_class.callback) | Import all importable options |
def add_connectionmanager_api(mock):
iface = 'org.ofono.ConnectionManager'
mock.AddProperties(iface, {
'Attached': _parameters.get('Attached', True),
'Bearer': _parameters.get('Bearer', 'gprs'),
'RoamingAllowed': _parameters.get('RoamingAllowed', False),
'Powered': _parameters.get('ConnectionPowered', True),
})
mock.AddMethods(iface, [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("%s")' % iface),
('SetProperty', 'sv', '', 'self.Set("%(i)s", args[0], args[1]); '
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", [args[0], args[1]])' % {'i': iface}),
('AddContext', 's', 'o', 'ret = "/"'),
('RemoveContext', 'o', '', ''),
('DeactivateAll', '', '', ''),
('GetContexts', '', 'a(oa{sv})', 'ret = dbus.Array([])'),
]) | Add org.ofono.ConnectionManager API to a mock |
def to_xarray(self) -> "xarray.Dataset":
import xarray as xr
data_vars = {
"frequencies": xr.DataArray(self.frequencies, dims="bin"),
"errors2": xr.DataArray(self.errors2, dims="bin"),
"bins": xr.DataArray(self.bins, dims=("bin", "x01"))
}
coords = {}
attrs = {
"underflow": self.underflow,
"overflow": self.overflow,
"inner_missed": self.inner_missed,
"keep_missed": self.keep_missed
}
attrs.update(self._meta_data)
return xr.Dataset(data_vars, coords, attrs) | Convert to xarray.Dataset |
def knit(self, input_file, opts_chunk='eval=FALSE'):
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out | Use Knitr to convert the r-markdown input_file
into markdown, returning a file object. |
def download_files(file_list):
for _, source_data_file in file_list:
sql_gz_name = source_data_file['name'].split('/')[-1]
msg = 'Downloading: %s' % (sql_gz_name)
log.debug(msg)
new_data = objectstore.get_object(
handelsregister_conn, source_data_file, 'handelsregister')
with open('data/{}'.format(sql_gz_name), 'wb') as outputzip:
outputzip.write(new_data) | Download the latest data. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.