code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):
raster_r = RasterUtilClass.read_raster(rasterfile)
xsize = raster_r.nCols
ysize = raster_r.nRows
nodata_value = raster_r.noDataValue
srs = raster_r.srs
x_min = raster_r.xMin
y_max = raster_r.yMax
dx = raster_r.dx
data = raster_r.data
if not keep_nodata:
i_min = ysize - 1
i_max = 0
j_min = xsize - 1
j_max = 0
for i in range(ysize):
for j in range(xsize):
if abs(data[i][j] - nodata_value) > DELTA:
i_min = min(i, i_min)
i_max = max(i, i_max)
j_min = min(j, j_min)
j_max = max(j, j_max)
y_size_mask = i_max - i_min + 1
x_size_mask = j_max - j_min + 1
x_min_mask = x_min + j_min * dx
y_max_mask = y_max - i_min * dx
else:
y_size_mask = ysize
x_size_mask = xsize
x_min_mask = x_min
y_max_mask = y_max
i_min = 0
j_min = 0
print('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask))
mask = numpy.zeros((y_size_mask, x_size_mask))
for i in range(y_size_mask):
for j in range(x_size_mask):
if abs(data[i + i_min][j + j_min] - nodata_value) > DELTA:
mask[i][j] = 1
else:
mask[i][j] = DEFAULT_NODATA
mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, -dx]
RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask,
mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)
return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs)
|
Generate mask data from a given raster data.
Args:
rasterfile: raster file path.
outmaskfile: output mask file path.
Returns:
Raster object of mask data.
|
juraj-google-style
|
def __setitem__(self, anchor_id, anchor):
with self._anchor_path(anchor_id).open(mode='wt') as f:
save_anchor(f, anchor, self.root)
|
Update an anchor.
This will update an existing anchor if it exists, or it will create new
storage if not.
Args:
anchor_id: The ID of the anchor to update.
anchor: The anchor to store.
|
juraj-google-style
|
def add_delegate(self, callback):
if (callback in self._delegate_methods):
return
self._delegate_methods.append(callback)
|
Registers a new delegate callback
The prototype should be function(data), where data will be the decoded json push
Args:
callback (function): method to trigger when push center receives events
|
codesearchnet
|
def distance_to_angle(distance, units='metric'):
if units in ('km', 'metric'):
pass
elif units in ('sm', 'imperial', 'US customary'):
distance *= STATUTE_MILE
elif units in ('nm', 'nautical'):
distance *= NAUTICAL_MILE
else:
raise ValueError('Unknown units type %r' % units)
return math.degrees(distance / BODY_RADIUS)
|
Convert a distance in to an angle along a great circle.
Args:
distance (float): Distance to convert to degrees
units (str): Unit type to be used for distances
Returns:
float: Angle in degrees
Raises:
ValueError: Unknown value for ``units``
|
juraj-google-style
|
def raw_value(self):
if (self.parent_setting is not None):
return self.parent_setting.raw_value[self.full_name]
else:
return getattr(settings, self.full_name)
|
Property to return the variable defined in ``django.conf.settings``.
Returns:
object: the variable defined in ``django.conf.settings``.
Raises:
AttributeError: if the variable is missing.
KeyError: if the item is missing from nested setting.
|
codesearchnet
|
def variance_inflation_factors(df):
corr = np.corrcoef(df, rowvar=0)
corr_inv = np.linalg.inv(corr)
vifs = np.diagonal(corr_inv)
return pd.Series(vifs, df.columns, name='VIF')
|
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
|
juraj-google-style
|
def _getsize_from_header(self, header):
for key in self._SIZE_KEYS:
try:
return int(header.pop(key))
except KeyError:
continue
else:
raise UnsupportedOperation('getsize')
|
Return the size from header
Args:
header (dict): Object header.
Returns:
int: Size in bytes.
|
codesearchnet
|
def _ValidateCacheEntryHeader(self, cache_entry_header):
return ((cache_entry_header.request_size > 0) and (cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH) and (cache_entry_header.major_format_version == 1) and (cache_entry_header.last_fetched_time > 0) and (cache_entry_header.fetch_count > 0))
|
Determines whether the values in the cache entry header are valid.
Args:
cache_entry_header (firefox_cache1_entry_header): cache entry header.
Returns:
bool: True if the cache entry header is valid.
|
codesearchnet
|
def set_enable(self, name, vrid, value=False, run=True):
if (value is False):
cmd = ('vrrp %d shutdown' % vrid)
elif (value is True):
cmd = ('no vrrp %d shutdown' % vrid)
else:
raise ValueError("vrrp property 'enable' must be True or False")
if run:
result = self.configure_interface(name, cmd)
if (result is False):
return self.error
return result
return cmd
|
Set the enable property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (boolean): True to enable the vrrp, False to disable.
run (boolean): True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure
If run is False, returns the formatted command string which can
be passed to the node
|
codesearchnet
|
def _fqdn(o, oset=True, recheck=False, pmodule=None):
if ((id(o) in _set_failures) or (o is None)):
return None
if (recheck or (not _safe_hasattr(o, '__fqdn__'))):
import inspect
if (not hasattr(o, '__name__')):
msg.warn('Skipped object {}: no __name__ attribute.'.format(o), 3)
return
result = None
if (hasattr(o, '__acornext__') and (o.__acornext__ is not None)):
otarget = o.__acornext__
else:
otarget = o
omod = (_safe_getmodule(otarget) or pmodule)
if ((omod is None) and hasattr(otarget, '__objclass__') and (otarget.__objclass__ is not None)):
omod = _safe_getmodule(otarget.__objclass__)
parts = (('<unknown>' if (omod is None) else omod.__name__), otarget.__objclass__.__name__, otarget.__name__)
result = '{}.{}.{}'.format(*parts)
elif ((omod is None) and hasattr(otarget, '__class__') and (otarget.__class__ is not None)):
omod = _safe_getmodule(otarget.__class__)
parts = (('<unknown>' if (omod is None) else omod.__name__), otarget.__class__.__name__, otarget.__name__)
result = '{}.{}.{}'.format(*parts)
elif (omod is not otarget):
parts = (_fqdn(omod, False), otarget.__name__)
result = '{}.{}'.format(*parts)
else:
result = otarget.__name__
if oset:
_safe_setattr(o, '__fqdn__', result)
return result
if _safe_hasattr(o, '__fqdn__'):
return o.__fqdn__
|
Returns the fully qualified name of the object.
Args:
o (type): instance of the object's type.
oset (bool): when True, the fqdn will also be set on the object as attribute
`__fqdn__`.
recheck (bool): for sub-classes, sometimes the super class has already had
its __fqdn__ attribute set; in that case, we want to recheck the
object's name. This usually only gets used during object extension.
|
codesearchnet
|
def _join_modules(module1, module2):
if not module1:
return module2
if not module2:
return module1
return '%s.%s' % (module1, module2)
|
Concatenate 2 module components.
Args:
module1: First module to join.
module2: Second module to join.
Returns:
Given two modules aaa.bbb and ccc.ddd, returns a joined
module aaa.bbb.ccc.ddd.
|
github-repos
|
def write(self, data):
while data:
try:
n = self._socket.send(data)
except socket.error:
n = None
if not n:
raise EOFError('Socket closed')
data = data[n:]
|
Send *n* bytes to socket.
Args:
data(bytes): The data to send.
Raises:
EOFError: If the socket was closed.
|
juraj-google-style
|
def sub_map(self, counters_map):
for counter_name in counters_map.counters:
self.increment(counter_name, (- counters_map.counters[counter_name]))
|
Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
|
codesearchnet
|
def Add(self, request, callback=None):
handler = RequestResponseAndHandler(request, None, callback)
self.__request_response_handlers[self._NewId()] = handler
|
Add a new request.
Args:
request: A http_wrapper.Request to add to the batch.
callback: A callback to be called for this response, of the
form callback(response, exception). The first parameter is the
deserialized response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors
occurred.
Returns:
None
|
codesearchnet
|
def run_model(self, op_list, num_steps, feed_vars=(), feed_data=None, print_every=100, allow_initialize=True):
feed_data = (feed_data or itertools.repeat(()))
ops = [bookkeeper.global_step()]
ops.extend(op_list)
sess = tf.get_default_session()
self.prepare_model(sess, allow_initialize=allow_initialize)
results = []
try:
if (num_steps is None):
counter = itertools.count(0)
elif (num_steps >= 0):
counter = xrange(num_steps)
else:
raise ValueError(('num_steps cannot be negative: %s' % num_steps))
for (i, data) in zip(counter, feed_data):
log_this_time = (print_every and ((i % print_every) == 0))
if (len(data) != len(feed_vars)):
raise ValueError(('feed_data and feed_vars must be the same length: %d vs %d' % (len(data), len(feed_vars))))
if self._coord.should_stop():
print('Coordinator stopped')
sys.stdout.flush()
self.stop_queues()
break
if (len(feed_vars) != len(data)):
raise ValueError('Feed vars must be the same length as data.')
if (log_this_time and self._summary_writer):
results = sess.run((ops + [self._summaries]), dict(zip(feed_vars, data)))
self._summary_writer.add_summary(results[(- 1)], results[0])
results = results[:(- 1)]
else:
results = sess.run(ops, dict(zip(feed_vars, data)))
if log_this_time:
self._log_and_save(sess, results)
if (print_every and (not log_this_time)):
self._log_and_save(sess, results)
except tf.errors.OutOfRangeError as ex:
print(('Done training -- epoch limit reached %s' % ex.message))
sys.stdout.flush()
self.stop_queues()
except BaseException as ex:
print(('Exception -- stopping threads: %s' % ex), file=sys.stderr)
sys.stdout.flush()
self.stop_queues()
raise
return results
|
Runs `op_list` for `num_steps`.
Args:
op_list: A list of ops to run.
num_steps: Number of steps to run this for. If feeds are used, this is a
maximum. `None` can be used to signal "forever".
feed_vars: The variables to feed.
feed_data: An iterator that feeds data tuples.
print_every: Print a log line and checkpoing every so many steps.
allow_initialize: If True, the model will be initialized if any variable
is uninitialized, if False the model will not be initialized.
Returns:
The final run result as a list.
Raises:
ValueError: If feed_data doesn't match feed_vars.
|
codesearchnet
|
def association(self, group_xid):
association = {'groupXid': group_xid}
self._indicator_data.setdefault('associatedGroups', []).append(association)
|
Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
|
juraj-google-style
|
def read_cdx(file, encoding='utf8'):
with codecs.getreader(encoding)(file) as stream:
header_line = stream.readline()
separator = header_line[0]
field_keys = header_line.strip().split(separator)
if (field_keys.pop(0) != 'CDX'):
raise ValueError('CDX header not found.')
for line in stream:
(yield dict(zip(field_keys, line.strip().split(separator))))
|
Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value.
|
codesearchnet
|
def _kl_normal_normal(n_a, n_b, name=None):
with tf.name_scope(name or "kl_normal_normal"):
one = tf.constant(1, dtype=n_a.dtype)
two = tf.constant(2, dtype=n_a.dtype)
half = tf.constant(0.5, dtype=n_a.dtype)
s_a_squared = tf.square(n_a.scale)
s_b_squared = tf.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *
(ratio - one - tf.math.log(ratio)))
|
Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
|
juraj-google-style
|
def chi_squared(source_frequency, target_frequency):
target_prob = frequency_to_probability(target_frequency)
source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)
result = 0
for symbol, prob in target_prob.items():
symbol_frequency = source_frequency.get(symbol, 0)
result += _calculate_chi_squared(symbol_frequency, prob, source_len)
return result
|
Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.
Example:
>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})
0.1
Args:
source_frequency (dict): Frequency map of the text you are analyzing
target_frequency (dict): Frequency map of the target language to compare with
Returns:
Decimal value of the chi-squared statistic
|
juraj-google-style
|
def get_selection(cls, strings, title='Select an option', subtitle=None, exit_option=True, _menu=None):
menu = cls(strings, title, subtitle, exit_option)
if (_menu is not None):
_menu.append(menu)
menu.show()
menu.join()
return menu.selected_option
|
Single-method way of getting a selection out of a list of strings.
Args:
strings (:obj:`list` of :obj:`str`): The list of strings this menu should be built from.
title (str): The title of the menu.
subtitle (str): The subtitle of the menu.
exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True.
_menu: Should probably only be used for testing, pass in a list and the created menu used internally by
the method will be appended to it
Returns:
int: The index of the selected option.
|
codesearchnet
|
def _init_ready_op(self, ready_op=USE_DEFAULT, ready_for_local_init_op=USE_DEFAULT):
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
|
Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized. If it's set to
USE_DEFAULT, creates an op that checks all the variables are
initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op. If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
|
github-repos
|
def download(self, updates):
if (updates.count() == 0):
ret = {'Success': False, 'Updates': 'Nothing to download'}
return ret
downloader = self._session.CreateUpdateDownloader()
self._session.ClientApplicationID = 'Salt: Download Update'
with salt.utils.winapi.Com():
download_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
for update in updates.updates:
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyDownloaded'] = bool(update.IsDownloaded)
if (not salt.utils.data.is_true(update.EulaAccepted)):
log.debug('Accepting EULA: %s', update.Title)
update.AcceptEula()
if (not salt.utils.data.is_true(update.IsDownloaded)):
log.debug('To Be Downloaded: %s', uid)
log.debug('\tTitle: %s', update.Title)
download_list.Add(update)
if (download_list.Count == 0):
ret = {'Success': True, 'Updates': 'Nothing to download'}
return ret
downloader.Updates = download_list
try:
log.debug('Downloading Updates')
result = downloader.Download()
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
log.error('Download Failed: %s', failure_code)
raise CommandExecutionError(failure_code)
result_code = {0: 'Download Not Started', 1: 'Download In Progress', 2: 'Download Succeeded', 3: 'Download Succeeded With Errors', 4: 'Download Failed', 5: 'Download Aborted'}
log.debug('Download Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if (result.ResultCode in [2, 3]):
log.debug('Downloaded Successfully')
ret['Success'] = True
else:
log.debug('Download Failed')
ret['Success'] = False
for i in range(download_list.Count):
uid = download_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]
return ret
|
Download the updates passed in the updates collection. Load the updates
collection using ``search`` or ``available``
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be downloaded.
Returns:
dict: A dictionary containing the results of the download
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# Download KB3195454
updates = wua.search('KB3195454')
results = wua.download(updates)
|
codesearchnet
|
def randwindow(self, window_shape):
row = random.randrange(window_shape[0], self.shape[1])
col = random.randrange(window_shape[1], self.shape[2])
return self[(:, (row - window_shape[0]):row, (col - window_shape[1]):col)]
|
Get a random window of a given shape from within an image
Args:
window_shape (tuple): The desired shape of the returned image as (height, width) in pixels.
Returns:
image: a new image object of the specified shape and same type
|
codesearchnet
|
def _safe_scalar_div(numerator, denominator, name):
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return math_ops.div_no_nan(numerator, denominator, name=name)
|
Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
|
github-repos
|
def set_step(step):
_summary_state.step = step
|
Sets the default summary step for the current thread.
For convenience, this function sets a default value for the `step` parameter
used in summary-writing functions elsewhere in the API so that it need not
be explicitly passed in every such invocation. The value can be a constant
or a variable, and can be retrieved via `tf.summary.experimental.get_step()`.
Note: when using this with @tf.functions, the step value will be captured at
the time the function is traced, so changes to the step outside the function
will not be reflected inside the function unless using a `tf.Variable` step.
Args:
step: An `int64`-castable default step value, or None to unset.
|
github-repos
|
def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options):
serialized_tensors, feed_additions, registered_savers, graph_proto = self._gather_serialized_tensors(object_graph_tensor)
if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function():
saver = functional_saver.MultiDeviceSaver(serialized_tensors, registered_savers)
save_op = saver.save(file_prefix, options=options)
with ops.device('/cpu:0'):
with ops.control_dependencies([save_op]):
self._cached_save_operation = array_ops.identity(file_prefix)
self._last_save_object_graph = graph_proto
return (self._cached_save_operation, feed_additions)
|
Create or retrieve save ops.
Args:
file_prefix: The prefix for saved checkpoint files.
object_graph_tensor: A `Tensor` to which the current object graph will be
fed.
options: `CheckpointOptions` object.
Returns:
A two-element tuple with a filename tensor and a feed_dict of tensors to
feed when running it (if graph building). The feed dict contains the
current object graph and any Python state to be saved in the
checkpoint. When executing eagerly only the first argument is meaningful.
|
github-repos
|
def bootstrap_results(self, init_state):
del init_state
return []
|
Returns an object with the same type as returned by `one_step(...)[1]`.
Args:
init_state: 1D `tf.Tensor` which is the initial chain state.
Returns:
kernel_results: Empty list.
|
github-repos
|
def chip_as_adjacency_list(device: 'cirq.google.XmonDevice',
) -> Dict[GridQubit, List[GridQubit]]:
c_set = set(device.qubits)
c_adj = {}
for n in device.qubits:
c_adj[n] = []
for m in [above(n), left_of(n), below(n), right_of(n)]:
if m in c_set:
c_adj[n].append(m)
return c_adj
|
Gives adjacency list representation of a chip.
The adjacency list is constructed in order of above, left_of, below and
right_of consecutively.
Args:
device: Chip to be converted.
Returns:
Map from nodes to list of qubits which represent all the neighbours of
given qubit.
|
juraj-google-style
|
def __call__(self, name):
priv = "_" + name
def getter(this):
if ((not hasattr(this, priv) or getattr(this, priv) is None) and
hasattr(this, "_setters") and isinstance(this._setters, (list, tuple))):
for prefix in this._setters:
cmd = "{}{}".format(prefix, priv)
if hasattr(this, cmd):
getattr(this, cmd)()
if hasattr(this, priv):
break
if isinstance(self.pre_get, str):
getattr(this, self.pre_get)()
elif callable(self.pre_get):
self.pre_get(this)
return getattr(this, priv, None)
def setter(this, value):
if self.autoconv and not isinstance(value, self.types) and value is not None:
for t in self.types:
try:
value = t(value)
break
except Exception as e:
if self.verbose:
warnings.warn("Conversion of {} (with type {}) failed to type {}\n{}".format(name, type(value), t, str(e)))
else:
raise TypeError("Cannot convert object of type {} to any of {}.".format(type(value), self.types))
elif ((value is None and self.allow_none == False) or
(not isinstance(value, self.types) and value is not None)):
raise TypeError("Object '{}' cannot have type {}, must be of type(s) {}.".format(name, type(value), self.types))
if isinstance(self.pre_set, str):
getattr(this, self.pre_set)()
elif callable(self.pre_set):
self.pre_set(this)
if isinstance(this, (pd.DataFrame, pd.SparseDataFrame)):
this[priv] = value
else:
setattr(this, priv, value)
if isinstance(self.post_set, str):
getattr(this, self.post_set)()
elif callable(self.post_set):
self.post_set(this)
def deleter(this):
if isinstance(self.pre_del, str):
getattr(this, self.pre_del)()
elif callable(self.pre_del):
self.pre_del(this)
delattr(this, priv)
if isinstance(self.post_del, str):
getattr(this, self.post_del)()
elif callable(self.post_del):
self.post_del(this)
return property(getter, setter, deleter, doc=self.doc)
|
Construct the property.
Args:
name (str): Attribute (property) name
Returns:
prop (property): Custom property definition with support for typing
|
juraj-google-style
|
def file(cls, path, encoding=None, parser=None):
cls.__hierarchy.append(file.File(path, encoding, parser))
|
Set a file as a source.
File are parsed as literal python dicts by default, this behaviour
can be configured.
Args:
path: The path to the file to be parsed
encoding: The encoding of the file.
Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'.
Custom value can be used in conjunction with parser.
parser: A parser function for a custom encoder.
It is expected to return a dict containing the parsed values
when called with the contents of the file as an argument.
|
codesearchnet
|
def get_string(self, sort_keys=False, pretty=False):
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0:
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
|
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
|
juraj-google-style
|
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
axis = self._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
|
Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
|
juraj-google-style
|
def read_execution_stack_trace(self, execution):
host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]
return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids])
|
Read the stack trace of a given Execution object.
Args:
execution: The Execution object of interest.
Returns:
1. The host name.
2. The stack trace, as a list of (file_path, lineno, func) tuples.
|
github-repos
|
def __init__(self, cluster_spec, initializer=None, share_gpu=True):
_active_pool_runners.add(self)
self._cluster_spec = cluster_spec
self._initializer = initializer
self._share_gpu = share_gpu
self._conn = {}
self._runner = None
|
Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
share_gpu: Whether to share GPUs among workers. If False, each worker is
assigned different GPUs in a roundrobin fashion.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
|
github-repos
|
def cctop_save_xml(jobid, outpath):
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http:
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status))
|
Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
|
juraj-google-style
|
def get_key_by_job_id(cls, mapreduce_id):
return db.Key.from_path(cls.kind(), str(mapreduce_id))
|
Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
|
juraj-google-style
|
def getEstTraitCorrCoef(self, term_i=None):
cov = self.getEstTraitCovar(term_i)
stds = SP.sqrt(cov.diagonal())[(:, SP.newaxis)]
RV = ((cov / stds) / stds.T)
return RV
|
Returns the estimated trait correlation matrix
Args:
term_i: index of the term we are interested in
|
codesearchnet
|
def upload(self, title, description="", keywords="", developer_tags=None, access_control=AccessControl.Public):
if not self.authenticated:
raise ApiError(_("Authentication is required"))
my_media_group = gdata.media.Group(
title=gdata.media.Title(text=title),
description=gdata.media.Description(description_type='plain',
text=description),
keywords=gdata.media.Keywords(text=keywords),
category=[gdata.media.Category(
text='Autos',
scheme='http:
label='Autos')],
)
extension = self._access_control(access_control, my_media_group)
video_entry = gdata.youtube.YouTubeVideoEntry(
media=my_media_group, extension_elements=extension)
if developer_tags:
video_entry.AddDeveloperTags(developer_tags)
response = Api.yt_service.GetFormUploadToken(video_entry)
post_url = response[0]
youtube_token = response[1]
return {'post_url': post_url, 'youtube_token': youtube_token}
|
Browser based upload
Creates the video entry and meta data to initiate a browser upload
Authentication is needed
Params:
title: string
description: string
keywords: comma seperated string
developer_tags: tuple
Return:
dict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token }
Raises:
ApiError: on no authentication
|
juraj-google-style
|
def Cancel(self, request, global_params=None):
config = self.GetMethodConfig('Cancel')
return self._RunMethod(config, request, global_params=global_params)
|
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (CloudbuildProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
|
github-repos
|
def match_urls_to_resources(self, url_values):
valid_values = {}
for resource in self.Meta.related_resources:
for (k, v) in url_values.items():
resource_url = resource.get_resource_url(resource, resource.Meta.base_url)
if isinstance(v, list):
if all([(resource_url in i) for i in v]):
self.set_related_method(resource, v)
valid_values[k] = v
elif (resource_url in v):
self.set_related_method(resource, v)
valid_values[k] = v
return valid_values
|
For the list of valid URLs, try and match them up
to resources in the related_resources attribute.
Args:
url_values: A dictionary of keys and URL strings that
could be related resources.
Returns:
valid_values: The values that are valid
|
codesearchnet
|
def get_rdf_es_idx_map(cls, idx_obj):
idx_name = list(idx_obj)[0]
es_map = {
"index": idx_name,
"body" : {
"mappings": {},
"settings": {
"index": {
"analysis": {
"analyzer": {
"keylower": {
"tokenizer": "keyword",
"type": "custom",
"filter": "lowercase",
"ignore_above" : 256
}
}
}
}
}
}
}
for idx_cls in idx_obj[idx_name]:
es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = \
{'properties': idx_cls.es_mapping(idx_cls)}
return es_map
|
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
|
juraj-google-style
|
def opt_separator(self) -> bool:
start = self.offset
self.dfa([{'': (lambda : (- 1)), ' ': (lambda : 0), '\t': (lambda : 0), '\n': (lambda : 0), '\r': (lambda : 1), '/': (lambda : 2)}, {'': self._back_break, '\n': (lambda : 0)}, {'': self._back_break, '/': (lambda : 3), '*': (lambda : 4)}, {'': (lambda : 3), '\n': (lambda : 0)}, {'': (lambda : 4), '*': (lambda : 5)}, {'': (lambda : 4), '/': (lambda : 0), '*': (lambda : 5)}])
return (start < self.offset)
|
Parse an optional separator and return ``True`` if found.
Raises:
EndOfInput: If past the end of input.
|
codesearchnet
|
def verify(self, verify_key):
if not self.mardata.signatures or not self.mardata.signatures.sigs:
return False
hashers = []
for sig in self.mardata.signatures.sigs:
hashers.append((sig.algorithm_id, sig.signature, make_hasher(sig.algorithm_id)))
assert len(hashers) == len(self.mardata.signatures.sigs)
for block in get_signature_data(self.fileobj,
self.mardata.signatures.filesize):
[h.update(block) for (_, _, h) in hashers]
for algo_id, sig, h in hashers:
if not verify_signature(verify_key, sig, h.finalize(), h.algorithm.name):
return False
else:
return True
|
Verify that this MAR file has a valid signature.
Args:
verify_key (str): PEM formatted public key
Returns:
True if the MAR file's signature matches its contents
False otherwise; this includes cases where there is no signature.
|
juraj-google-style
|
def heightmap_multiply_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray
) -> None:
hm3[:] = hm1[:] * hm2[:]
|
Multiplies two heightmap's together and stores the result in ``hm3``.
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to multiply with the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
.. deprecated:: 2.0
Do ``hm3[:] = hm1[:] * hm2[:]`` instead.
Alternatively you can do ``HeightMap(hm1.array[:] * hm2.array[:])``.
|
juraj-google-style
|
def move(self, delta):
self.pos = (self.pos[0]+delta[0], self.pos[1]+delta[1])
|
Move the node.
Args:
delta (tupel): A tupel, holding the adjustment of the position.
|
juraj-google-style
|
def GetName(obj):
precondition.AssertType(obj, (type, types.FunctionType))
if PY2:
return obj.__name__.decode('ascii')
else:
return obj.__name__
|
A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string.
|
codesearchnet
|
def _right_pad(x, final_rank):
padded_shape = tf.concat(
[tf.shape(input=x),
tf.ones(final_rank - tf.rank(x), dtype=tf.int32)],
axis=0)
static_padded_shape = None
if x.shape.is_fully_defined() and isinstance(final_rank, int):
static_padded_shape = x.shape.as_list()
extra_dims = final_rank - len(static_padded_shape)
static_padded_shape.extend([1] * extra_dims)
padded_x = tf.reshape(x, static_padded_shape or padded_shape)
return padded_x
|
Pads the shape of x to the right to be of rank final_rank.
Expands the dims of `x` to the right such that its rank is equal to
final_rank. For example, if `x` is of shape [1, 5, 7, 2] and `final_rank` is
7, we return padded_x, which is of shape [1, 5, 7, 2, 1, 1, 1].
Args:
x: The tensor whose shape is to be padded.
final_rank: Scalar int32 `Tensor` or Python `int`. The desired rank of x.
Returns:
padded_x: A tensor of rank final_rank.
|
juraj-google-style
|
def get_attributes(path):
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
attributes = {}
intAttributes = win32file.GetFileAttributes(path)
attributes['archive'] = (intAttributes & 32) == 32
attributes['reparsePoint'] = (intAttributes & 1024) == 1024
attributes['compressed'] = (intAttributes & 2048) == 2048
attributes['directory'] = (intAttributes & 16) == 16
attributes['encrypted'] = (intAttributes & 16384) == 16384
attributes['hidden'] = (intAttributes & 2) == 2
attributes['normal'] = (intAttributes & 128) == 128
attributes['notIndexed'] = (intAttributes & 8192) == 8192
attributes['offline'] = (intAttributes & 4096) == 4096
attributes['readonly'] = (intAttributes & 1) == 1
attributes['system'] = (intAttributes & 4) == 4
attributes['temporary'] = (intAttributes & 256) == 256
attributes['mountedVolume'] = False
if attributes['reparsePoint'] is True and attributes['directory'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA0000003:
attributes['mountedVolume'] = True
attributes['symbolicLink'] = False
if attributes['reparsePoint'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA000000C:
attributes['symbolicLink'] = True
return attributes
|
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
|
juraj-google-style
|
def _ProcessAMCacheProgramKey(self, am_entry, parser_mediator):
amcache_datetime = am_entry.get_value_by_name(
self._AMCACHE_P_INSTALLDATE).get_data_as_integer()
event_data = AmcacheProgramEventData()
name = am_entry.get_value_by_name(self._AMCACHE_P_NAME)
if name:
event_data.name = name.get_data_as_string()
version = am_entry.get_value_by_name(self._AMCACHE_P_VERSION)
if version:
event_data.version = version.get_data_as_string()
publisher = am_entry.get_value_by_name(self._AMCACHE_P_PUBLISHER)
if publisher:
event_data.publisher = publisher.get_data_as_string()
languagecode = am_entry.get_value_by_name(self._AMCACHE_P_LANGUAGECODE)
if languagecode:
event_data.languagecode = languagecode.get_data_as_string()
entrytype = am_entry.get_value_by_name(self._AMCACHE_P_ENTRYTYPE)
if entrytype:
event_data.entrytype = entrytype.get_data_as_string()
uninstallkey = am_entry.get_value_by_name(self._AMCACHE_P_UNINSTALLKEY)
if uninstallkey:
uninstallkey = uninstallkey.get_data()
uninstallkey = uninstallkey.decode('utf-16-LE')
event_data.uninstallkey = uninstallkey
filepaths = am_entry.get_value_by_name(self._AMCACHE_P_FILEPATHS)
if filepaths:
filepaths = filepaths.get_data()
filepaths = filepaths.decode('utf-16-LE')
event_data.filepaths = filepaths
productcode = am_entry.get_value_by_name(self._AMCACHE_P_PRODUCTCODE)
if productcode:
event_data.productcode = productcode.get_data_as_string()
packagecode = am_entry.get_value_by_name(self._AMCACHE_P_PACKAGECODE)
if packagecode:
event_data.packagecode = packagecode.get_data_as_string()
msiproductcode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPRODUCTCODE)
if msiproductcode:
msiproductcode = msiproductcode.get_data()
msiproductcode = msiproductcode.decode('utf-16-LE')
event_data.msiproductcode = msiproductcode
msipackagecode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPACKAGECODE)
if msipackagecode:
msipackagecode = msipackagecode.get_data()
msipackagecode = msipackagecode.decode('utf-16-LE')
event_data.msipackagecode = msipackagecode
files = am_entry.get_value_by_name(self._AMCACHE_P_FILES)
if files:
files = files.get_data()
files = files.decode('utf-16-LE')
event_data.files = files
event = time_events.DateTimeValuesEvent(
posix_time.PosixTime(amcache_datetime),
definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an Amcache Root/Programs key for events.
Args:
am_entry (pyregf.key): amcache Programs key.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
juraj-google-style
|
def _check_warnings(self, json_response):
self.warnings = None
if json_response:
self.warnings = json_response.get('warnings')
if (self.debug and self.warnings):
for w in self.warnings:
print(('WARNING: %s - %s' % (w['warning_name'], w['warning_msg'])))
|
Extract warnings from the response to make them accessible
Args:
json_response (dict): JSON response
|
codesearchnet
|
def new_module(self):
self.reset_run_errors()
if (self._code is None):
return None
module_name = ('bk_script_' + make_id().replace('-', ''))
module = ModuleType(str(module_name))
module.__dict__['__file__'] = os.path.abspath(self._path)
return module
|
Make a fresh module to run in.
Returns:
Module
|
codesearchnet
|
def eval_features(json):
return {'close' : json[-1]['close'],
'sma' : SMA.eval_from_json(json),
'rsi' : RSI.eval_from_json(json),
'so' : SO.eval_from_json(json),
'obv' : OBV.eval_from_json(json)}
|
Gets technical analysis features from market data JSONs
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
Dict of market features and their values
|
juraj-google-style
|
def get_linear_interpolated_value(x_values, y_values, x):
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
|
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
|
juraj-google-style
|
class FixedPointMul(Function):
@staticmethod
def forward(ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x
else:
reshape = lambda x: x.view(1, 1, -1)
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = z_scaling_factor.type(torch.float).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / 2.0 ** e)
if identity is not None:
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = z_scaling_factor.type(torch.float).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / 2.0 ** e1)
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None)
|
Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
Args:
pre_act (`torch.Tensor`):
Input tensor.
pre_act_scaling_factor (`torch.Tensor`):
Scaling factor of the input tensor *pre_act*.
bit_num (`int`):
Quantization bitwidth.
z_scaling_factor (`torch.Tensor`):
Scaling factor of the output tensor.
identity (`torch.Tensor`, *optional*):
Identity tensor, if exists.
identity_scaling_factor (`torch.Tensor`, *optional*):
Scaling factor of the identity tensor *identity*, if exists.
Returns:
`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
*identity*), whose scale is rescaled to *z_scaling_factor*.
|
github-repos
|
def FromString(cls, desc):
parse_exp = Literal(u'run_time').suppress() + time_interval(u'interval')
try:
data = parse_exp.parseString(desc)
return TimeBasedStopCondition(data[u'interval'][0])
except ParseException:
raise ArgumentError(u"Could not parse time based stop condition")
|
Parse this stop condition from a string representation.
The string needs to match:
run_time number [seconds|minutes|hours|days|months|years]
Args:
desc (str): The description
Returns:
TimeBasedStopCondition
|
juraj-google-style
|
def load_from_checkpoint(self, sess, latest_filename=None):
self._create_initializers()
if self._save_path:
ckpt = tf.train.get_checkpoint_state(
os.path.dirname(self._save_path), latest_filename)
if ckpt and ckpt.all_model_checkpoint_paths:
self._saver = tf.train.Saver(saver_def=self._saver.as_saver_def())
self._saver.set_last_checkpoints(list(ckpt.all_model_checkpoint_paths))
if self._saver.last_checkpoints:
self._saver.restore(sess, self._saver.last_checkpoints[-1])
return self._saver.last_checkpoints[-1]
else:
return None
|
Loads the model from the most recent checkpoint.
This gets the most current list of checkpoints each time it is called.
Args:
sess: The current session.
latest_filename: The filename for the latest set of checkpoints, defaults
to 'checkpoints'.
Returns:
The loaded checkpoint or None if it failed to load.
|
juraj-google-style
|
def console_fill_foreground(
con: tcod.console.Console,
r: Sequence[int],
g: Sequence[int],
b: Sequence[int],
) -> None:
if len(r) != len(g) or len(r) != len(b):
raise TypeError("R, G and B must all have the same size.")
if (
isinstance(r, np.ndarray)
and isinstance(g, np.ndarray)
and isinstance(b, np.ndarray)
):
r_ = np.ascontiguousarray(r, dtype=np.intc)
g_ = np.ascontiguousarray(g, dtype=np.intc)
b_ = np.ascontiguousarray(b, dtype=np.intc)
cr = ffi.cast("int *", r_.ctypes.data)
cg = ffi.cast("int *", g_.ctypes.data)
cb = ffi.cast("int *", b_.ctypes.data)
else:
cr = ffi.new("int[]", r)
cg = ffi.new("int[]", g)
cb = ffi.new("int[]", b)
lib.TCOD_console_fill_foreground(_console(con), cr, cg, cb)
|
Fill the foregound of a console with r,g,b.
Args:
con (Console): Any Console instance.
r (Sequence[int]): An array of integers with a length of width*height.
g (Sequence[int]): An array of integers with a length of width*height.
b (Sequence[int]): An array of integers with a length of width*height.
.. deprecated:: 8.4
You should assign to :any:`tcod.console.Console.fg` instead.
|
juraj-google-style
|
def make_supercells_with_defects(self, scaling_matrix):
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for (ids, defect_site) in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(defect_site.species_string, defect_site.frac_coords, coords_are_cartesian=False, validate_proximity=False, properties=None)
if (not sc_with_inter):
raise RuntimeError('could not generate supercell with interstitial {}'.format((ids + 1)))
scs.append(sc_with_inter.copy())
return scs
|
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
|
codesearchnet
|
async def rename(self, name):
(await self._client.rename_conversation(hangouts_pb2.RenameConversationRequest(request_header=self._client.get_request_header(), new_name=name, event_request_header=self._get_event_request_header())))
|
Rename this conversation.
Hangouts only officially supports renaming group conversations, so
custom names for one-to-one conversations may or may not appear in all
first party clients.
Args:
name (str): New name.
Raises:
.NetworkError: If conversation cannot be renamed.
|
codesearchnet
|
def StatResultFromStatEntry(
stat_entry):
values = []
for attr in _STAT_ATTRS[:10]:
values.append(stat_entry.Get(attr))
return os.stat_result(values)
|
Returns a `os.stat_result` with most information from `StatEntry`.
This is a lossy conversion, only the 10 first stat_result fields are
populated, because the os.stat_result constructor is inflexible.
Args:
stat_entry: An instance of rdf_client_fs.StatEntry.
Returns:
An instance of `os.stat_result` with basic fields populated.
|
juraj-google-style
|
def instantiate(self, substitutions):
param_dict = self.substitute_params(substitutions)
(pkg, ident) = self.name.rsplit('.', 1)
pkg = ('malcolm.modules.%s' % pkg)
try:
ob = importlib.import_module(pkg)
except ImportError as e:
raise_with_traceback(ImportError(('\n%s:%d:\n%s' % (self.filename, self.lineno, e))))
try:
ob = getattr(ob, ident)
except AttributeError:
raise_with_traceback(ImportError(('\n%s:%d:\nPackage %r has no ident %r' % (self.filename, self.lineno, pkg, ident))))
try:
model = MethodModel.from_callable(ob, returns=False)
args = model.validate(param_dict)
ret = ob(**args)
except Exception as e:
sourcefile = inspect.getsourcefile(ob)
lineno = inspect.getsourcelines(ob)[1]
raise_with_traceback(YamlError(('\n%s:%d:\n%s:%d:\n%s' % (self.filename, self.lineno, sourcefile, lineno, e))))
else:
return ret
|
Keep recursing down from base using dotted name, then call it with
self.params and args
Args:
substitutions (dict): Substitutions to make to self.param_dict
Returns:
The found object called with (*args, map_from_d)
E.g. if ob is malcolm.parts, and name is "ca.CADoublePart", then the
object will be malcolm.parts.ca.CADoublePart
|
codesearchnet
|
def _ExtractRequestSummaryFields(document):
headers = document.childAtPath('Header/RequestHeader')
body = document.childAtPath('Body')
summary_fields = {
'methodName': body.getChildren()[0].name
}
client_customer_id = headers.getChild('clientCustomerId')
if client_customer_id is not None:
summary_fields['clientCustomerId'] = client_customer_id.text
network_code = headers.getChild('networkCode')
if network_code is not None:
summary_fields['networkCode'] = network_code.text
return summary_fields
|
Extract logging fields from the request's suds.sax.element.Element.
Args:
document: A suds.sax.element.Element instance containing the API request.
Returns:
A dict mapping logging field names to their corresponding value.
|
juraj-google-style
|
def result(self):
if self._read_only:
return self._result
with self._condition:
if (self._wait_for_tree and (not self._result_set_in_context)):
self._condition.wait_for((lambda : (self._tree_has_set or self._result_set_in_context)))
return self._result
|
Return the value at an address, optionally waiting until it is
set from the context_manager, or set based on the pre-fetch mechanism.
Returns:
(bytes): The opaque value for an address.
|
codesearchnet
|
def DecryptPrivateKey(self, encrypted_private_key):
aes = AES.new(self._master_key, AES.MODE_CBC, self._iv)
return aes.decrypt(encrypted_private_key)
|
Decrypt the provided ciphertext with the initialized private key.
Args:
encrypted_private_key (byte string): the ciphertext to be decrypted.
Returns:
bytes: the ciphertext.
|
codesearchnet
|
def validate(data):
try:
return Schema(Validator.SCHEMA).validate(data)
except SchemaError as exception:
logging.getLogger(__name__).error(exception)
return None
|
Validate data against the schema.
Args:
data(dict): data structure to validate.
Returns:
dict: data as provided and defaults where defined in schema.
|
juraj-google-style
|
def GetRange(self, start, end=None, additional_headers=None, use_chunks=True):
self.EnsureInitialized()
progress_end_normalized = False
if (self.total_size is not None):
(progress, end_byte) = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while ((not progress_end_normalized) or (end_byte is None) or (progress <= end_byte)):
end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers)
if (not progress_end_normalized):
self.__SetTotal(response.info)
(progress, end_byte) = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if (response.length == 0):
if (response.status_code == http_client.OK):
return
raise exceptions.TransferRetryError('Zero bytes unexpectedly returned in download response')
|
Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
|
codesearchnet
|
def process(self):
threads = []
for client in self.find_clients(self.hostnames):
print(client)
thread = threading.Thread(target=self._process_thread, args=(client,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
|
Collect the artifacts.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform.
|
codesearchnet
|
def parse_plugin_metadata(content):
if not isinstance(content, bytes):
raise TypeError('Content type must be bytes')
result = plugin_data_pb2.PrCurvePluginData.FromString(content)
if result.version == 0:
return result
else:
logger.warn(
'Unknown metadata version: %s. The latest version known to '
'this build of TensorBoard is %s; perhaps a newer build is '
'available?', result.version, PROTO_VERSION)
return result
|
Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
|
juraj-google-style
|
def IsPayable(self):
from neo.Core.State.ContractState import ContractPropertyState
return ((self.ContractProperties & ContractPropertyState.Payable) > 0)
|
Flag indicating if the contract accepts payments.
Returns:
bool: True if supported. False otherwise.
|
codesearchnet
|
def destroy_cloudwatch_log_event(app='', env='dev', region=''):
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app)
return True
|
Destroy Cloudwatch log event.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
|
juraj-google-style
|
def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data):
request = {
'ModelPackageName': name,
'ModelPackageDescription': description,
'SourceAlgorithmSpecification': {
'SourceAlgorithms': [
{
'AlgorithmName': algorithm_arn,
'ModelDataUrl': model_data
}
]
}
}
try:
LOGGER.info('Creating model package with name: {}'.format(name))
self.sagemaker_client.create_model_package(**request)
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if (
error_code == 'ValidationException'
and 'ModelPackage already exists' in message
):
LOGGER.warning('Using already existing model package: {}'.format(name))
else:
raise
|
Create a SageMaker Model Package from the results of training with an Algorithm Package
Args:
name (str): ModelPackage name
description (str): Model Package description
algorithm_arn (str): arn or name of the algorithm used for training.
model_data (str): s3 URI to the model artifacts produced by training
|
juraj-google-style
|
def do_ams_get_url(endpoint, access_token, flag=True):
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
body = ''
response = requests.get(endpoint, headers=headers, allow_redirects=flag)
if flag:
if response.status_code == 301:
response = requests.get(response.headers['location'], data=body, headers=headers)
return response
|
Do an AMS GET request to retrieve the Final AMS Endpoint and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
access_token (str): A valid Azure authentication token.
flag (str): A Flag to follow the redirect or not.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def __init__(self, coords):
self._coords = np.array(coords)
self.space_dim, self.simplex_dim = self._coords.shape
self.origin = self._coords[-1]
if self.space_dim == self.simplex_dim + 1:
self._aug = np.concatenate([coords, np.ones((self.space_dim, 1))],
axis=-1)
self._aug_inv = np.linalg.inv(self._aug)
|
Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].
|
juraj-google-style
|
def select_tasks(self, nids=None, wslice=None, task_class=None):
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
tasks = list(self.iflat_tasks())
if task_class is not None:
tasks = [t for t in tasks if t.isinstance(task_class)]
return tasks
|
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
|
juraj-google-style
|
def unpack(self, parallel_tensor):
self._assert_eager()
unpacked_components = [[] for _ in range(len(self.components))]
with ops.device(self._name):
parallel_tensor = variable_utils.convert_variables_to_tensors(parallel_tensor)
for tensor in nest.flatten(parallel_tensor, expand_composites=True):
for accumulator, unpacked_tensor in zip(unpacked_components, self._unpack_tensor(tensor)):
accumulator.append(unpacked_tensor)
return [nest.pack_sequence_as(parallel_tensor, unpacked, expand_composites=True) for unpacked in unpacked_components]
|
Unpack a parallel tensor into its components.
Args:
parallel_tensor: A tensor, composite tensor, or `tf.nest` of such placed
on the ParallelDevice. Passing `tf.Variable` objects reads their value,
it does not share a mutable reference between the packed and unpacked
forms.
Returns:
A list with the same length as `self.components` each with the same
structure as `parallel_tensor`, containing component tensors.
|
github-repos
|
def parse_args(arglist=None):
climan = CLIManager(conf, **SUB_CMDS)
create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git', zsh_sourceable=True)
(cmd_args, all_subs) = climan.parse_args(arglist)
sub_cmd = cmd_args.loam_sub_name
if (sub_cmd is None):
return cmd_args.func
if (sub_cmd != 'config'):
commands.report_parsing_problems(PARSING_OUT)
if conf.common.set:
set_conf_str(conf, conf.common.set)
if conf.common.config:
commands.config_pp(all_subs)
load_mplstyle()
try:
_steps_to_slices()
except AttributeError:
pass
return cmd_args.func
|
Parse cmd line arguments.
Update :attr:`stagpy.conf` accordingly.
Args:
arglist (list of str): the list of cmd line arguments. If set to
None, the arguments are taken from :attr:`sys.argv`.
Returns:
function: the function implementing the sub command to be executed.
|
codesearchnet
|
def qc_data(self, tests, alias=None):
r = {m: c.quality(tests, alias) for m, c in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for m, results in r.items():
if m in s:
results.update(s[m])
return r
|
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
|
juraj-google-style
|
def phenotypes_to_scored(self,phenotypes=None,overwrite=False):
if not self.is_uniform(): raise ValueError("inconsistent phenotypes")
if phenotypes is None:
phenotypes = self.phenotypes
elif isinstance(phenotypes,str):
phenotypes = [phenotypes]
def _post(binary,phenotype_label,phenotypes,overwrite):
d = binary.copy()
if len(set(phenotypes)&set(list(binary.keys()))) > 0 and overwrite==False:
raise ValueError("Error, phenotype already exists as a scored type")
for label in phenotypes: d[label] = 0
if phenotype_label == phenotype_label and phenotype_label in phenotypes:
d[phenotype_label] = 1
return d
output = self.copy()
output['scored_calls'] = output.apply(lambda x:
_post(x['scored_calls'],x['phenotype_label'],phenotypes,overwrite)
,1)
return output
|
Add mutually exclusive phenotypes to the scored calls
Args:
phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all
overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls
Returns:
CellDataFrame
|
juraj-google-style
|
def __init__(self, timeout, proxy_config, cache):
if not cache:
cache = zeep.cache.SqliteCache()
elif cache == ZeepServiceProxy.NO_CACHE:
cache = None
super(_ZeepProxyTransport, self).__init__(
timeout=timeout, operation_timeout=timeout, cache=cache)
self.session.proxies = proxy_config.proxies
|
Initializes _ZeepProxyTransport.
Args:
timeout: An integer timeout in MS for connections.
proxy_config: A ProxyConfig instance representing proxy settings.
cache: A zeep.cache.Base instance representing a cache strategy to employ.
|
juraj-google-style
|
def broadcast_dynamic_shape_extended(a: DynamicRaggedShape, b: DynamicRaggedShape):
if a.row_partitions and b.row_partitions:
if a.dtype != b.dtype:
raise ValueError("Dtypes don't match")
elif a.dtype != b.dtype:
if a.row_partitions:
b = b.with_dtype(a.dtype)
elif b.row_partitions:
a = a.with_dtype(b.dtype)
else:
a = a.with_dtype(dtypes.int64)
b = b.with_dtype(dtypes.int64)
if a.rank is None or b.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
elif a.rank == 0:
return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b))
elif b.rank == 0:
return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, []))
elif a.rank == 1 and b.rank == 1:
[a_layer, b_layer, target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape)
target_shape = DynamicRaggedShape._from_inner_shape(target)
return (target_shape, _Broadcaster(a, target_shape, [a_layer]), _Broadcaster(b, target_shape, [b_layer]))
if a.rank > b.rank:
c, bc, ac = _broadcast_dynamic_shape_extended_helper(b, a)
return (c, ac, bc)
return _broadcast_dynamic_shape_extended_helper(a, b)
|
Gets the smallest shape to which a and b can broadcast.
In order to create the smallest shape, one must also do most of the
work to figure out how to transform from the shapes given. Thus, in addition
to returning the shape, it also creates transformations from the
original shapes to the result.
This is the equivalent of:
c = broadcast_dynamic_shape(a, b)
ac = get_broadcaster(a, c)
bc = get_broadcaster(b, c)
return (c, ac, bc)
Args:
a: a DynamicRaggedShape
b: a DynamicRaggedShape
Returns:
A triple of a shape and two broadcasters.
|
github-repos
|
def _add_common_constrain(token_lst: List[Dict], d: Dict) -> List[Dict]:
result = []
for a_token in token_lst:
if not tf_transfer(d["is_required"]):
a_token["OP"] = "?"
result.append(a_token)
return result
|
Add common constrain for every token type, like "is_required"
Args:
token_lst: List[Dict]
d: Dict
Returns: List[Dict]
|
juraj-google-style
|
def parse_doctype(cls, file, encoding=None):
if encoding:
lxml_encoding = (to_lxml_encoding(encoding) or 'latin1')
else:
lxml_encoding = encoding
try:
parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True)
tree = lxml.etree.parse(io.BytesIO(wpull.util.peek_file(file)), parser=parser)
if (tree.getroot() is not None):
return tree.docinfo.doctype
except lxml.etree.LxmlError:
pass
|
Get the doctype from the document.
Returns:
str, None
|
codesearchnet
|
def power(self, n):
if not isinstance(n, int):
raise QiskitError("Can only take integer powers of Operator.")
if self.input_dims() != self.output_dims():
raise QiskitError("Can only power with input_dims = output_dims.")
return Operator(
np.linalg.matrix_power(self.data, n), self.input_dims(),
self.output_dims())
|
Return the matrix power of the operator.
Args:
n (int): the power to raise the matrix to.
Returns:
BaseOperator: the n-times composed operator.
Raises:
QiskitError: if the input and output dimensions of the operator
are not equal, or the power is not a positive integer.
|
juraj-google-style
|
def plot_soma(ax, soma, plane='xy', soma_outline=True, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA):
(plane0, plane1) = _plane2col(plane)
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
(plane0, plane1) = _plane2col(plane)
for (start, end) in zip(soma.points, soma.points[1:]):
common.project_cylinder_onto_2d(ax, (plane0, plane1), start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha)
elif soma_outline:
ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius, color=color, alpha=alpha))
else:
(plane0, plane1) = _plane2col(plane)
points = [(p[plane0], p[plane1]) for p in soma.iter()]
if points:
points.append(points[0])
ax.plot(points, color=color, alpha=alpha, linewidth=linewidth)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
bounding_box = geom.bounding_box(soma)
ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]], [bounding_box[1][plane0], bounding_box[1][plane1]])), ignore=False)
|
Generates a 2d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
|
codesearchnet
|
def parse(self, s, term_join=None):
if not term_join:
term_join = lambda x: '(' + ' OR '.join(x) + ')'
toks = self.scan(s)
if toks and toks[0] and (toks[0][0] == self.TERM or toks[0][0] == self.QUOTEDTERM):
toks = [(self.MARKER, 'about')] + toks
bymarker = []
for t in toks:
if t[0] == self.MARKER:
bymarker.append((t[1], []))
else:
bymarker[-1][1].append(t)
comps = []
for t in bymarker:
t = list(t)
if t[0] == 'in' and len(t[1]) == 1 and isinstance(t[1][0][1], string_types) and self.stem(
t[1][0][1]) in self.geograins.keys():
t[0] = 'by'
if t[0] == 'from' and len(t[1]) == 1 and t[1][0][0] != self.YEAR:
t[0] = 'source'
comps.append(t)
groups = {marker: [] for marker, _ in comps}
for marker, terms in comps:
groups[marker] += [term for marker, term in terms]
for marker, group in groups.items():
if marker == 'about':
continue
if len(group) > 1 and marker not in self.multiterms:
groups[marker], extras = [group[0]], group[1:]
if not 'about' in groups:
groups['about'] = extras
else:
groups['about'] += extras
if marker == 'by':
groups['by'] = [ self.geograins.get(self.stem(e)) for e in group]
for marker, terms in iteritems(groups):
if len(terms) > 1:
if marker in 'in':
groups[marker] = ' '.join(terms)
else:
groups[marker] = term_join(terms)
elif len(terms) == 1:
groups[marker] = terms[0]
else:
pass
return groups
|
Parses search term to
Args:
s (str): string with search term.
or_join (callable): function to join 'OR' terms.
Returns:
dict: all of the terms grouped by marker. Key is a marker, value is a term.
Example:
>>> SearchTermParser().parse('table2 from 1978 to 1979 in california')
{'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'}
|
juraj-google-style
|
def transform_rest_response(self, response_body):
body_json = json.loads(response_body)
return json.dumps(body_json, indent=1, sort_keys=True)
|
Translates an apiserving REST response so it's ready to return.
Currently, the only thing that needs to be fixed here is indentation,
so it's consistent with what the live app will return.
Args:
response_body: A string containing the backend response.
Returns:
A reformatted version of the response JSON.
|
juraj-google-style
|
def listen_forever(self, timeout_ms=30000, exception_handler=None, bad_sync_timeout=5):
_bad_sync_timeout = bad_sync_timeout
self.should_listen = True
while self.should_listen:
try:
self._sync(timeout_ms)
_bad_sync_timeout = bad_sync_timeout
except MatrixRequestError as e:
logger.warning('A MatrixRequestError occured during sync.')
if (e.code >= 500):
logger.warning('Problem occured serverside. Waiting %i seconds', bad_sync_timeout)
sleep(bad_sync_timeout)
_bad_sync_timeout = min((_bad_sync_timeout * 2), self.bad_sync_timeout_limit)
elif (exception_handler is not None):
exception_handler(e)
else:
raise
except Exception as e:
logger.exception('Exception thrown during sync')
if (exception_handler is not None):
exception_handler(e)
else:
raise
|
Keep listening for events forever.
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
bad_sync_timeout (int): Base time to wait after an error before
retrying. Will be increased according to exponential backoff.
|
codesearchnet
|
def get_snapshots(self, volume_id_or_uri, start=0, count=(- 1), filter='', sort=''):
uri = self.__build_volume_snapshot_uri(volume_id_or_uri)
return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)
|
Gets all snapshots of a volume. Returns a list of snapshots based on optional sorting and filtering, and
constrained by start and count parameters.
Args:
volume_id_or_uri:
Can be either the volume id or the volume uri.
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of snapshots.
|
codesearchnet
|
def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None):
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append '
'additional features.')
if not feat_type:
feat_type = 'Manually added protein sequence single residue feature'
newfeat = SeqFeature(location=FeatureLocation(ExactPosition(resnum-1), ExactPosition(resnum)),
type=feat_type,
id=feat_id,
qualifiers=qualifiers)
self.features.append(newfeat)
|
Add a feature to the features list describing a single residue.
Args:
resnum (int): Protein sequence residue number
feat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
|
juraj-google-style
|
def run(self, resources):
if not resources['connection']._port.startswith('jlink'):
raise ArgumentError("FlashBoardStep is currently only possible through jlink", invalid_port=args['port'])
hwman = resources['connection']
debug = hwman.hwman.debug(self._debug_string)
debug.flash(self._file)
|
Runs the flash step
Args:
resources (dict): A dictionary containing the required resources that
we needed access to in order to perform this step.
|
juraj-google-style
|
class EvalLoopContainer:
def __init__(self, do_nested_concat: bool=True, padding_index: int=-100):
self.do_nested_concat = do_nested_concat
self.padding_index = padding_index
self.tensors = None
self.arrays = None
def add(self, tensors) -> None:
if self.tensors is None:
self.tensors = tensors if self.do_nested_concat else [tensors]
elif self.do_nested_concat:
self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index)
else:
self.tensors.append(tensors)
def to_cpu_and_numpy(self) -> None:
if self.tensors is None:
return
new_arrays = nested_numpify(self.tensors)
if self.arrays is None:
self.arrays = new_arrays
elif self.do_nested_concat:
self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index)
else:
self.arrays.extend(new_arrays)
self.tensors = None
def get_arrays(self):
self.to_cpu_and_numpy()
return self.arrays
|
Container to store intermediate results of evaluation loop.
Args:
do_nested_concat (`bool`, *optional*, defaults to `True`):
If set to `True`, each iteration will recursively concatenate a new object containing tensors to
the existing stored tensors, provided that the structure of the existing object and the new one
are identical. If set to `False`, all newly added tensors will be stored in a list.
padding_index (`int`, *optional*, defaults to -100):
Value used to pad tensors of different shapes when `do_nested_concat=True`.
|
github-repos
|
def AddPerformanceOptions(self, argument_group):
argument_group.add_argument('--buffer_size', '--buffer-size', '--bs', dest='buffer_size', action='store', default=0, help='The buffer size for the output (defaults to 196MiB).')
argument_group.add_argument('--queue_size', '--queue-size', dest='queue_size', action='store', default=0, help='The maximum number of queued items per worker (defaults to {0:d})'.format(self._DEFAULT_QUEUE_SIZE))
|
Adds the performance options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
codesearchnet
|
def with_port_scanning(cls):
def init(wsgi_app, flags):
should_scan = (flags.port is None)
base_port = (core_plugin.DEFAULT_PORT if (flags.port is None) else flags.port)
max_attempts = (10 if should_scan else 1)
if (base_port > 65535):
raise TensorBoardServerException(('TensorBoard cannot bind to port %d > %d' % (base_port, 65535)))
max_attempts = (10 if should_scan else 1)
base_port = (min((base_port + max_attempts), 65536) - max_attempts)
for port in xrange(base_port, (base_port + max_attempts)):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if (not should_scan):
raise
raise TensorBoardServerException(('TensorBoard could not bind to any port around %s (tried %d times)' % (base_port, max_attempts)))
return init
|
Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
|
codesearchnet
|
def transpose(vari):
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = transpose(core[key])
return Poly(core, vari.dim, vari.shape[::(- 1)], vari.dtype)
return numpy.transpose(vari)
|
Transpose a shapeable quantety.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Quantety of interest.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari``.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.transpose(P))
[[1, q0^2], [q0, q0^3]]
|
codesearchnet
|
def _activation_summary(x):
tf.histogram_summary(x.name + '/activations', x)
tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
|
Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
|
juraj-google-style
|
def parse_table_schema_from_json(schema_string):
try:
json_schema = json.loads(schema_string)
except JSONDecodeError as e:
raise ValueError('Unable to parse JSON schema: %s - %r' % (schema_string, e))
def _parse_schema_field(field):
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema
fields = [_parse_schema_field(f) for f in json_schema['fields']]
return bigquery.TableSchema(fields=fields)
|
Parse the Table Schema provided as string.
Args:
schema_string: String serialized table schema, should be a valid JSON.
Returns:
A TableSchema of the BigQuery export from either the Query or the Table.
|
github-repos
|
def _remove_double_brackets(text):
def replacement_fn(s):
if (':' in s):
return ''
bar_pos = s.find('|')
if (bar_pos == (- 1)):
return s
return s[(bar_pos + 1):]
return _find_and_replace(text, '[[', ']]', replacement_fn)
|
Remove double brackets, but leave the viewable text.
Args:
text: a string
Returns:
a string
|
codesearchnet
|
def trace_max_buffer_capacity(self):
cmd = enums.JLinkTraceCommand.GET_MAX_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to get max trace buffer size.')
return data.value
|
Retrieves the maximum size the trace buffer can be configured with.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The maximum configurable capacity for the trace buffer.
|
juraj-google-style
|
def __init__(self, config_builder, height=1000):
tf.logging.set_verbosity(tf.logging.WARN)
config = config_builder.build()
copied_config = dict(config)
self.estimator_and_spec = (
dict(config.get('estimator_and_spec'))
if 'estimator_and_spec' in config else {})
self.compare_estimator_and_spec = (
dict(config.get('compare_estimator_and_spec'))
if 'compare_estimator_and_spec' in config else {})
if 'estimator_and_spec' in copied_config:
del copied_config['estimator_and_spec']
if 'compare_estimator_and_spec' in copied_config:
del copied_config['compare_estimator_and_spec']
self.custom_predict_fn = (
config.get('custom_predict_fn')
if 'custom_predict_fn' in config else None)
self.compare_custom_predict_fn = (
config.get('compare_custom_predict_fn')
if 'compare_custom_predict_fn' in config else None)
if 'custom_predict_fn' in copied_config:
del copied_config['custom_predict_fn']
if 'compare_custom_predict_fn' in copied_config:
del copied_config['compare_custom_predict_fn']
self._set_examples(config['examples'])
del copied_config['examples']
self.config = copied_config
WitWidget.widgets.append(self)
display.display(display.HTML(self._get_element_html()))
display.display(display.HTML(
WIT_HTML.format(
examples=json.dumps(self.examples), height=height, id=WitWidget.index)))
WitWidget.index += 1
output.eval_js(.format(
config=json.dumps(self.config)))
output.eval_js('updateExamplesCallback()')
self._generate_sprite()
|
Constructor for colab notebook WitWidget.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
height: Optional height in pixels for WIT to occupy. Defaults to 1000.
|
juraj-google-style
|
def is_auth(self):
if (self.user_id is None):
self.user_id = self.session.get('user_id')
return bool(self.user_id)
|
A property that indicates if current user is logged in or not.
Returns:
Boolean.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.