code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def modify_model_backprop(model, backprop_modifier):
modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier))
if modified_model is not None:
return modified_model
model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
try:
model.save(model_path)
modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier)
if modifier_fn is None:
raise ValueError("'{}' modifier is not supported".format(backprop_modifier))
modifier_fn(backprop_modifier)
with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}):
modified_model = load_model(model_path)
_MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model
return modified_model
finally:
os.remove(model_path) | Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.
Args:
model: The `keras.models.Model` instance.
backprop_modifier: One of `{'guided', 'rectified'}`
Returns:
A copy of model with modified activations for backwards pass. |
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
context.destroy() | A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1'). |
def backward(heads, head_grads=None, retain_graph=False, train_mode=True):
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) | Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting. |
def register_single(key, value, param=None):
get_current_scope().container.register(key, lambda: value, param) | Generates resolver to return singleton value and adds it to global container |
def format_warning_oneline(message, category, filename, lineno,
file=None, line=None):
return ('{category}: {message}'
.format(message=message, category=category.__name__)) | Format a warning for logging.
The returned value should be a single-line string, for better
logging style (although this is not enforced by the code).
This methods' arguments have the same meaning of the
like-named arguments from `warnings.formatwarning`. |
def create_attr_obj(self, protocol_interface, phy_interface):
self.intf_attr[protocol_interface] = TopoIntfAttr(
protocol_interface, phy_interface)
self.store_obj(protocol_interface, self.intf_attr[protocol_interface]) | Creates the local interface attribute object and stores it. |
def bind(self, environment):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv | Create a copy of this extension bound to another environment. |
def complete(self, nextq=None, delay=None, depends=None):
if nextq:
logger.info('Advancing %s to %s from %s',
self.jid, nextq, self.queue_name)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data), 'next', nextq,
'delay', delay or 0, 'depends',
json.dumps(depends or [])) or False
else:
logger.info('Completing %s', self.jid)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data)) or False | Turn this job in as complete, optionally advancing it to another
queue. Like ``Queue.put`` and ``move``, it accepts a delay, and
dependencies |
def create_config(config_path="scriptworker.yaml"):
if not os.path.exists(config_path):
print("{} doesn't exist! Exiting...".format(config_path), file=sys.stderr)
sys.exit(1)
with open(config_path, "r", encoding="utf-8") as fh:
secrets = safe_load(fh)
config = dict(deepcopy(DEFAULT_CONFIG))
if not secrets.get("credentials"):
secrets['credentials'] = read_worker_creds()
config.update(secrets)
apply_product_config(config)
messages = check_config(config, config_path)
if messages:
print('\n'.join(messages), file=sys.stderr)
print("Exiting...", file=sys.stderr)
sys.exit(1)
credentials = get_frozen_copy(secrets['credentials'])
del(config['credentials'])
config = get_frozen_copy(config)
return config, credentials | Create a config from DEFAULT_CONFIG, arguments, and config file.
Then validate it and freeze it.
Args:
config_path (str, optional): the path to the config file. Defaults to
"scriptworker.yaml"
Returns:
tuple: (config frozendict, credentials dict)
Raises:
SystemExit: on failure |
def diff(ctx, branch):
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file) | Determine which tests intersect a git diff. |
def _flip_feature(self, feature, parent_len):
copy = feature.copy()
if copy.strand == 0:
copy.strand = 1
else:
copy.strand = 0
copy.start = parent_len - copy.start
copy.stop = parent_len - copy.stop
copy.start, copy.stop = copy.stop, copy.start
return copy | Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int |
def clear_diagram(self, diagram):
plot = self.subplots[diagram[1]][diagram[0]]
plot.cla() | Clear diagram.
Parameters
----------
diagram : [column, row]
Diagram to clear. |
def get_analytic(user, job_id, anc_id):
v1_utils.verify_existence_and_get(job_id, models.JOBS)
analytic = v1_utils.verify_existence_and_get(anc_id, _TABLE)
analytic = dict(analytic)
if not user.is_in_team(analytic['team_id']):
raise dci_exc.Unauthorized()
return flask.jsonify({'analytic': analytic}) | Get an analytic. |
def check_token_file(self):
warnings.warn('This method will be removed in future versions',
DeprecationWarning)
return self.token_backend.check_token() if hasattr(self.token_backend, 'check_token') else None | Checks if the token file exists at the given position
:return: if file exists or not
:rtype: bool |
def process_from_splash(self):
for software in self._softwares_from_splash:
plugin = self._plugins.get(software['name'])
try:
additional_data = {'version': software['version']}
except KeyError:
additional_data = {'type': INDICATOR_TYPE}
self._results.add_result(
Result(
name=plugin.name,
homepage=plugin.homepage,
from_url=self.requested_url,
plugin=plugin.name,
**additional_data,
)
)
for hint in self.get_hints(plugin):
self._results.add_result(hint) | Add softwares found in the DOM |
def send(sender_instance):
m = Mailin(
"https://api.sendinblue.com/v2.0",
sender_instance._kwargs.get("api_key")
)
data = {
"to": email_list_to_email_dict(sender_instance._recipient_list),
"cc": email_list_to_email_dict(sender_instance._cc),
"bcc": email_list_to_email_dict(sender_instance._bcc),
"from": email_address_to_list(sender_instance._from_email),
"subject": sender_instance._subject,
}
if sender_instance._template.is_html:
data.update({
"html": sender_instance._message,
"headers": {"Content-Type": "text/html; charset=utf-8"}
})
else:
data.update({"text": sender_instance._message})
if "attachments" in sender_instance._kwargs:
data["attachment"] = {}
for attachment in sender_instance._kwargs["attachments"]:
data["attachment"][attachment[0]] = base64.b64encode(attachment[1])
result = m.send_email(data)
if result["code"] != "success":
raise SendInBlueError(result["message"]) | Send a transactional email using SendInBlue API.
Site: https://www.sendinblue.com
API: https://apidocs.sendinblue.com/ |
def cmd(send, msg, _):
try:
answer = subprocess.check_output(['wtf', msg], stderr=subprocess.STDOUT)
send(answer.decode().strip().replace('\n', ' or ').replace('fuck', 'fsck'))
except subprocess.CalledProcessError as ex:
send(ex.output.decode().rstrip().splitlines()[0]) | Tells you what acronyms mean.
Syntax: {command} <term> |
def finite_datetimes(self, finite_start, finite_stop):
date_start = datetime(finite_start.year, finite_start.month, finite_start.day)
dates = []
for i in itertools.count():
t = date_start + timedelta(days=i)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t) | Simply returns the points in time that correspond to turn of day. |
def parse_args(options={}, *args, **kwds):
parser_options = ParserOptions(options)
parser_input = ParserInput(args, kwds)
parser = Parser(parser_options, parser_input)
parser.parse()
return parser.output_data | Parser of arguments.
dict options {
int min_items: Min of required items to fold one tuple. (default: 1)
int max_items: Count of items in one tuple. Last `max_items-min_items`
items is by default set to None. (default: 1)
bool allow_dict: Flag allowing dictionary as first (and only one)
argument or dictinary as **kwds. (default: False)
bool allow_list: Flag allowing list as first (and only one) argument.
(default: False)
}
Examples:
calling with min_items=1, max_items=2, allow_dict=False:
arg1, arg2 => ((arg1, None), (arg2, None))
(arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None))
arg1=val1 => FAIL
{key1: val1} => FAIL
calling with min_items=2, max_items=3, allow_dict=True:
arg1, arg2 => ((arg1, arg2, None),)
arg1, arg2, arg3 => ((arg1, arg2, arg3),)
(arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),)
arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None))
{key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None))
(arg1a, arg1b), arg2a, arg2b => FAIL |
def get_ptr(data, offset=None, ptr_type=ctypes.c_void_p):
ptr = ctypes.cast(ctypes.pointer(data), ctypes.c_void_p)
if offset:
ptr = ctypes.c_void_p(ptr.value + offset)
if ptr_type != ctypes.c_void_p:
ptr = ctypes.cast(ptr, ptr_type)
return ptr | Returns a void pointer to the data |
def tex_eps_emitter(target, source, env):
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source) | An emitter for TeX and LaTeX sources when
executing tex or latex. It will accept .ps and .eps
graphics files |
def send(self, request_id, payload):
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error() | Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol) |
def src_file(self):
try:
src_uri = (curl[Gentoo._LATEST_TXT] | tail["-n", "+3"]
| cut["-f1", "-d "])().strip()
except ProcessExecutionError as proc_ex:
src_uri = "NOT-FOUND"
LOG.error("Could not determine latest stage3 src uri: %s",
str(proc_ex))
return src_uri | Get the latest src_uri for a stage 3 tarball.
Returns (str):
Latest src_uri from gentoo's distfiles mirror. |
def to_json(self, indent=4):
agregate = {
'metas': self.metas,
}
agregate.update({k: getattr(self, k) for k in self._rule_attrs})
return json.dumps(agregate, indent=indent) | Serialize metas and reference attributes to a JSON string.
Keyword Arguments:
indent (int): Space indentation, default to ``4``.
Returns:
string: JSON datas. |
def cli_main():
if '--debug' in sys.argv:
LOG.setLevel(logging.DEBUG)
elif '--verbose' in sys.argv:
LOG.setLevel(logging.INFO)
args = _get_arguments()
try:
plugin, folder = get_plugin_and_folder(
inputzip=args.inputzip,
inputdir=args.inputdir,
inputfile=args.inputfile)
LOG.debug('Plugin: %s -- Folder: %s' % (plugin.name, folder))
run_mq2(
plugin, folder, lod_threshold=args.lod, session=args.session)
except MQ2Exception as err:
print(err)
return 1
return 0 | Main function when running from CLI. |
def trigger_deleted(self, filepath):
if not os.path.exists(filepath):
self._trigger('deleted', filepath) | Triggers deleted event if the flie doesn't exist. |
def get_seq_number_from_id(id, id_template, prefix, **kw):
separator = kw.get("separator", "-")
postfix = id.replace(prefix, "").strip(separator)
postfix_segments = postfix.split(separator)
seq_number = 0
possible_seq_nums = filter(lambda n: n.isalnum(), postfix_segments)
if possible_seq_nums:
seq_number = possible_seq_nums[-1]
seq_number = get_alpha_or_number(seq_number, id_template)
seq_number = to_int(seq_number)
return seq_number | Return the sequence number of the given ID |
def delete(self, filething=None):
if self.tags is not None:
temp_blocks = [
b for b in self.metadata_blocks if b.code != VCFLACDict.code]
self._save(filething, temp_blocks, False, padding=lambda x: 0)
self.metadata_blocks[:] = [
b for b in self.metadata_blocks
if b.code != VCFLACDict.code or b is self.tags]
self.tags.clear() | Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used. |
def _format_years(years):
def sub(x):
return x[1] - x[0]
ranges = []
for k, iterable in groupby(enumerate(sorted(years)), sub):
rng = list(iterable)
if len(rng) == 1:
s = str(rng[0][1])
else:
s = "{}-{}".format(rng[0][1], rng[-1][1])
ranges.append(s)
return ", ".join(ranges) | Format a list of ints into a string including ranges
Source: https://stackoverflow.com/a/9471386/1307974 |
def call(self, name, *args, **kwargs):
return self.client.call(self.name, name, *args, **kwargs) | Make a SoftLayer API call
:param service: the name of the SoftLayer API service
:param method: the method to call on the service
:param \\*args: same optional arguments that ``BaseClient.call`` takes
:param \\*\\*kwargs: same optional keyword arguments that
``BaseClient.call`` takes
:param service: the name of the SoftLayer API service
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> client['Account'].getVirtualGuests(mask="id", limit=10)
[...] |
def make_request(self, session, url, **kwargs):
log.debug('Making request: GET %s %s' % (url, kwargs))
return session.get(url, **kwargs) | Make a HTTP GET request.
:param url: The URL to get.
:returns: The response to the request.
:rtype: requests.Response |
def __make_request_headers(self, teststep_dict, entry_json):
teststep_headers = {}
for header in entry_json["request"].get("headers", []):
if header["name"].lower() in IGNORE_REQUEST_HEADERS:
continue
teststep_headers[header["name"]] = header["value"]
if teststep_headers:
teststep_dict["request"]["headers"] = teststep_headers | parse HAR entry request headers, and make teststep headers.
header in IGNORE_REQUEST_HEADERS will be ignored.
Args:
entry_json (dict):
{
"request": {
"headers": [
{"name": "Host", "value": "httprunner.top"},
{"name": "Content-Type", "value": "application/json"},
{"name": "User-Agent", "value": "iOS/10.3"}
],
},
"response": {}
}
Returns:
{
"request": {
headers: {"Content-Type": "application/json"}
} |
def join_cluster(host, user='rabbit', ram_node=None, runas=None):
cmd = [RABBITMQCTL, 'join_cluster']
if ram_node:
cmd.append('--ram')
cmd.append('{0}@{1}'.format(user, host))
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
stop_app(runas)
res = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False)
start_app(runas)
return _format_response(res, 'Join') | Join a rabbit cluster
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.join_cluster rabbit.example.com rabbit |
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load() | Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in |
def InsertIntArg(self, string='', **unused_kwargs):
try:
int_value = int(string)
except (TypeError, ValueError):
raise errors.ParseError('{0:s} is not a valid integer.'.format(string))
return self.InsertArg(int_value) | Inserts an Integer argument. |
def _CreateTaskStorageWriter(self, path, task):
return SQLiteStorageFileWriter(
self._session, path,
storage_type=definitions.STORAGE_TYPE_TASK, task=task) | Creates a task storage writer.
Args:
path (str): path to the storage file.
task (Task): task.
Returns:
SQLiteStorageFileWriter: storage writer. |
def _containing_contigs(self, hits):
return {hit.ref_name for hit in hits if self._contains(hit)} | Given a list of hits, all with same query,
returns a set of the contigs containing that query |
def selection_pos(self):
buff = self._vim.current.buffer
beg = buff.mark('<')
end = buff.mark('>')
return beg, end | Return start and end positions of the visual selection respectively. |
def get_value_in_base_currency(self) -> Decimal:
amt_orig = self.get_value()
sec_cur = self.get_currency()
cur_svc = CurrenciesAggregate(self.book)
base_cur = cur_svc.get_default_currency()
if sec_cur == base_cur:
return amt_orig
single_svc = cur_svc.get_currency_aggregate(sec_cur)
rate = single_svc.get_latest_rate(base_cur)
result = amt_orig * rate.value
return result | Calculates the value of security holdings in base currency |
def _merge_multi_context(outputs, major_axis):
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
if len(tensors) == 1:
rets.append(tensors[0])
else:
rets.append(nd.concat(*[tensor.as_in_context(tensors[0].context)
for tensor in tensors],
dim=axis))
else:
rets.append(tensors[0])
return rets | Merge outputs that lives on multiple context into one, so that they look
like living on one context. |
def get_last_modified_unix_sec():
path = request.args.get("path")
if path and os.path.isfile(path):
try:
last_modified = os.path.getmtime(path)
return jsonify({"path": path, "last_modified_unix_sec": last_modified})
except Exception as e:
return client_error({"message": "%s" % e, "path": path})
else:
return client_error({"message": "File not found: %s" % path, "path": path}) | Get last modified unix time for a given file |
def _retrieve_download_url():
try:
with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f:
remote_config = json.loads(f.read().decode('utf-8'))
if remote_config['nrfa_url'].startswith('.'):
remote_config['nrfa_url'] = 'file:' + pathname2url(os.path.abspath(remote_config['nrfa_url']))
_update_nrfa_metadata(remote_config)
return remote_config['nrfa_url']
except URLError:
return config['nrfa']['url'] | Retrieves download location for FEH data zip file from hosted json configuration file.
:return: URL for FEH data file
:rtype: str |
def _as_array_or_item(data):
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'M':
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data | Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed |
def DviPdfPsFunction(XXXDviAction, target = None, source= None, env=None):
try:
abspath = source[0].attributes.path
except AttributeError :
abspath = ''
saved_env = SCons.Scanner.LaTeX.modify_env_var(env, 'TEXPICTS', abspath)
result = XXXDviAction(target, source, env)
if saved_env is _null:
try:
del env['ENV']['TEXPICTS']
except KeyError:
pass
else:
env['ENV']['TEXPICTS'] = saved_env
return result | A builder for DVI files that sets the TEXPICTS environment
variable before running dvi2ps or dvipdf. |
def recover(
data: bytes,
signature: Signature,
hasher: Callable[[bytes], bytes] = eth_sign_sha3,
) -> Address:
_hash = hasher(data)
if signature[-1] >= 27:
signature = Signature(signature[:-1] + bytes([signature[-1] - 27]))
try:
sig = keys.Signature(signature_bytes=signature)
public_key = keys.ecdsa_recover(message_hash=_hash, signature=sig)
except BadSignature as e:
raise InvalidSignature from e
return public_key.to_canonical_address() | eth_recover address from data hash and signature |
def _calculate_edges(self):
left = self.specs.left_margin
left += (self.specs.label_width * (self._position[1] - 1))
if self.specs.column_gap:
left += (self.specs.column_gap * (self._position[1] - 1))
left *= mm
bottom = self.specs.sheet_height - self.specs.top_margin
bottom -= (self.specs.label_height * self._position[0])
if self.specs.row_gap:
bottom -= (self.specs.row_gap * (self._position[0] - 1))
bottom *= mm
return float(left), float(bottom) | Calculate edges of the current label. Not intended for external use. |
def bounds(self):
google_x, google_y = self.google
pixel_x_west, pixel_y_north = google_x * TILE_SIZE, google_y * TILE_SIZE
pixel_x_east, pixel_y_south = (google_x + 1) * TILE_SIZE, (google_y + 1) * TILE_SIZE
point_min = Point.from_pixel(pixel_x=pixel_x_west, pixel_y=pixel_y_south, zoom=self.zoom)
point_max = Point.from_pixel(pixel_x=pixel_x_east, pixel_y=pixel_y_north, zoom=self.zoom)
return point_min, point_max | Gets the bounds of a tile represented as the most west and south point and the most east and north point |
def _bind_baremetal_port(self, context, segment):
port = context.current
vif_details = {
portbindings.VIF_DETAILS_VLAN: str(
segment[driver_api.SEGMENTATION_ID])
}
context.set_binding(segment[driver_api.ID],
portbindings.VIF_TYPE_OTHER,
vif_details,
n_const.ACTIVE)
LOG.debug("AristaDriver: bound port info- port ID %(id)s "
"on network %(network)s",
{'id': port['id'],
'network': context.network.current['id']})
if port.get('trunk_details'):
self.trunk_driver.bind_port(port)
return True | Bind the baremetal port to the segment |
async def sqsStats(self, *args, **kwargs):
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs) | Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental`` |
def modis1kmto250m(lons1km, lats1km, cores=1):
if cores > 1:
return _multi(modis1kmto250m, lons1km, lats1km, 10, cores)
cols1km = np.arange(1354)
cols250m = np.arange(1354 * 4) / 4.0
along_track_order = 1
cross_track_order = 3
lines = lons1km.shape[0]
rows1km = np.arange(lines)
rows250m = (np.arange(lines * 4) - 1.5) / 4.0
satint = SatelliteInterpolator((lons1km, lats1km),
(rows1km, cols1km),
(rows250m, cols250m),
along_track_order,
cross_track_order,
chunk_size=40)
satint.fill_borders("y", "x")
lons250m, lats250m = satint.interpolate()
return lons250m, lats250m | Getting 250m geolocation for modis from 1km tiepoints.
http://www.icare.univ-lille1.fr/tutorials/MODIS_geolocation |
def list_devices():
output = {}
for device_id, device in devices.items():
output[device_id] = {
'host': device.host,
'state': device.state
}
return jsonify(devices=output) | List devices via HTTP GET. |
def _reset(self, load):
values = reduce(iadd, self._lists, [])
self._clear()
self._load = load
self._half = load >> 1
self._dual = load << 1
self._update(values) | Reset sorted list load.
The *load* specifies the load-factor of the list. The default load
factor of '1000' works well for lists from tens to tens of millions of
elements. Good practice is to use a value that is the cube root of the
list size. With billions of elements, the best load factor depends on
your usage. It's best to leave the load factor at the default until
you start benchmarking. |
async def add(self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
await self._add(ns_key, dumps(value), ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug("ADD %s %s (%.4f)s", ns_key, True, time.monotonic() - start)
return True | Stores the value in the given key with ttl if specified. Raises an error if the
key already exists.
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key is inserted
:raises:
- ValueError if key already exists
- :class:`asyncio.TimeoutError` if it lasts more than self.timeout |
def get_min_muO2(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
if pair.muO2_charge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
return min(data) if len(data) > 0 else None | Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments). |
def get_ssm_parameter(parameter_name):
try:
response = boto3.client('ssm').get_parameters(
Names=[parameter_name],
WithDecryption=True
)
return response.get('Parameters', None)[0].get('Value', '')
except Exception:
pass
return '' | Get the decrypted value of an SSM parameter
Args:
parameter_name - the name of the stored parameter of interest
Return:
Value if allowed and present else None |
def _init_module_cache():
if len(FieldTranslation._modules) < len(FieldTranslation._model_module_paths):
for module_path in FieldTranslation._model_module_paths:
FieldTranslation._modules[module_path] = importlib.import_module(module_path)
return True
return False | Module caching, it helps with not having to import again and again same modules.
@return: boolean, True if module caching has been done, False if module caching was already done. |
def focus_next_sibling(self):
w, focuspos = self.get_focus()
sib = self._tree.next_sibling_position(focuspos)
if sib is not None:
self.set_focus(sib) | move focus to next sibling of currently focussed one |
def set_contributor_details(self, contdetails):
if not isinstance(contdetails, bool):
raise TwitterSearchException(1008)
self.arguments.update({'contributor_details': 'true'
if contdetails
else 'false'}) | Sets 'contributor_details' parameter used to enhance the \
contributors element of the status response to include \
the screen_name of the contributor. By default only \
the user_id of the contributor is included
:param contdetails: Boolean triggering the usage of the parameter
:raises: TwitterSearchException |
def angle_to_distance(angle, units='metric'):
distance = math.radians(angle) * BODY_RADIUS
if units in ('km', 'metric'):
return distance
elif units in ('sm', 'imperial', 'US customary'):
return distance / STATUTE_MILE
elif units in ('nm', 'nautical'):
return distance / NAUTICAL_MILE
else:
raise ValueError('Unknown units type %r' % units) | Convert angle in to distance along a great circle.
Args:
angle (float): Angle in degrees to convert to distance
units (str): Unit type to be used for distances
Returns:
float: Distance in ``units``
Raises:
ValueError: Unknown value for ``units`` |
def substitution_set(string, indexes):
strlen = len(string)
return {mutate_string(string, x) for x in indexes if valid_substitution(strlen, x)} | for a string, return a set of all possible substitutions |
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars:
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
def width_fn(s): return _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn | Return a function to calculate visible cell width. |
def teardown_app_request(self, func: Callable) -> Callable:
self.record_once(lambda state: state.app.teardown_request(func))
return func | Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
... |
def verify_in(self, first, second, msg=""):
try:
self.assert_in(first, second, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m) | Soft assert for whether the first is in second
:params first: the value to check
:params second: the container to check in
:params msg: (Optional) msg explaining the difference |
def template(basedir, text, vars, lookup_fatal=True, expand_lists=False):
try:
text = text.decode('utf-8')
except UnicodeEncodeError:
pass
text = varReplace(basedir, unicode(text), vars, lookup_fatal=lookup_fatal, expand_lists=expand_lists)
return text | run a text buffer through the templating engine until it no longer changes |
def get_sources(zone, permanent=True):
cmd = '--zone={0} --list-sources'.format(zone)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split() | List sources bound to a zone
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_sources zone |
def setItemData(self, treeItem, column, value, role=Qt.EditRole):
if role == Qt.CheckStateRole:
if column != self.COL_VALUE:
return False
else:
logger.debug("Setting check state (col={}): {!r}".format(column, value))
treeItem.checkState = value
return True
elif role == Qt.EditRole:
if column != self.COL_VALUE:
return False
else:
logger.debug("Set Edit value (col={}): {!r}".format(column, value))
treeItem.data = value
return True
else:
raise ValueError("Unexpected edit role: {}".format(role)) | Sets the role data for the item at index to value. |
def fetch_routing_table(self, address):
new_routing_info = self.fetch_routing_info(address)
if new_routing_info is None:
return None
new_routing_table = RoutingTable.parse_routing_info(new_routing_info)
num_routers = len(new_routing_table.routers)
num_readers = len(new_routing_table.readers)
num_writers = len(new_routing_table.writers)
self.missing_writer = (num_writers == 0)
if num_routers == 0:
raise RoutingProtocolError("No routing servers returned from server %r" % (address,))
if num_readers == 0:
raise RoutingProtocolError("No read servers returned from server %r" % (address,))
return new_routing_table | Fetch a routing table from a given router address.
:param address: router address
:return: a new RoutingTable instance or None if the given router is
currently unable to provide routing information
:raise ServiceUnavailable: if no writers are available
:raise ProtocolError: if the routing information received is unusable |
def to_digestable(self, origin=None):
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
dlabels = ["%s%s" % (chr(len(x)), x.lower()) for x in labels]
return ''.join(dlabels) | Convert name to a format suitable for digesting in hashes.
The name is canonicalized and converted to uncompressed wire format.
@param origin: If the name is relative and origin is not None, then
origin will be appended to it.
@type origin: dns.name.Name object
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
absolute. If self is a relative name, then an origin must be supplied;
if it is missing, then this exception is raised
@rtype: string |
def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
from operator import itemgetter
item1 = itemgetter(1)
index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)]
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
return [x[0] for x in index_seq_pair] | \
Sorts a sequence naturally, but returns a list of sorted the
indeces and not the sorted list.
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
['num2', 'num3', 'num5']
>>> [b[i] for i in index]
['baz', 'foo', 'bar']
>>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')]
>>> from operator import itemgetter
>>> index_natsorted(c, key=itemgetter(1))
[2, 0, 1] |
def view(self, sort=None, purge=False, done=None, undone=None, **kwargs):
View(self.model.modify(
sort=self._getPattern(sort),
purge=purge,
done=self._getDone(done, undone)
), **kwargs) | Handles the 'v' command.
:sort: Sort pattern.
:purge: Whether to purge items marked as 'done'.
:done: Done pattern.
:undone: Not done pattern.
:kwargs: Additional arguments to pass to the View object. |
def human_bytes(n):
if n < 1024:
return '%d B' % n
k = n/1024
if k < 1024:
return '%d KB' % round(k)
m = k/1024
if m < 1024:
return '%.1f MB' % m
g = m/1024
return '%.2f GB' % g | Return the number of bytes n in more human readable form. |
def list(self, master=True):
params = {}
params.update(self.static_params)
if master:
params.update({
"recurrenceOptions": {
"collapseMode": "MASTER_ONLY",
},
"includeArchived": True,
"includeDeleted": False,
})
else:
current_time = time.time()
start_time = int((current_time - (365 * 24 * 60 * 60)) * 1000)
end_time = int((current_time + (24 * 60 * 60)) * 1000)
params.update({
"recurrenceOptions": {
"collapseMode":"INSTANCES_ONLY",
"recurrencesOnly": True,
},
"includeArchived": False,
"includeCompleted": False,
"includeDeleted": False,
"dueAfterMs": start_time,
"dueBeforeMs": end_time,
"recurrenceId": [],
})
return self.send(
url=self._base_url + 'list',
method='POST',
json=params
) | List current reminders. |
def create(self, key, value):
data = None
if key is not None:
key = key.strip()
self.tcex.log.debug(u'create variable {}'.format(key))
parsed_key = self.parse_variable(key.strip())
variable_type = parsed_key['type']
if variable_type in self.read_data_types:
data = self.create_data_types[variable_type](key, value)
else:
data = self.create_raw(key, value)
return data | Create method of CRUD operation for working with KeyValue DB.
This method will automatically determine the variable type and
call the appropriate method to write the data. If a non standard
type is provided the data will be written as RAW data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result string of DB write. |
async def fetch_device_list(self):
url = '{}/users/me'.format(API_URL)
dlist = await self.api_get(url)
if dlist is None:
_LOGGER.error('Unable to fetch eight devices.')
else:
self._devices = dlist['user']['devices']
_LOGGER.debug('Devices: %s', self._devices) | Fetch list of devices. |
def cast_scalar(method):
@wraps(method)
def new_method(self, other):
if np.isscalar(other):
other = type(self)([other],self.domain())
return method(self, other)
return new_method | Cast scalars to constant interpolating objects |
def codestr2rst(codestr, lang='python', lineno=None):
if lineno is not None:
if LooseVersion(sphinx.__version__) >= '1.3':
blank_lines = codestr.count('\n', 0, -len(codestr.lstrip()))
lineno = ' :lineno-start: {0}\n'.format(lineno + blank_lines)
else:
lineno = ' :linenos:\n'
else:
lineno = ''
code_directive = "\n.. code-block:: {0}\n{1}\n".format(lang, lineno)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block | Return reStructuredText code block from code string |
def post_process(self, xout, yout, params):
for post_processor in self.post_processors:
xout, yout, params = post_processor(xout, yout, params)
return xout, yout, params | Transforms internal values to output, used internally. |
def copy_bootstrap(bootstrap_target: Path) -> None:
for bootstrap_file in importlib_resources.contents(bootstrap):
if importlib_resources.is_resource(bootstrap, bootstrap_file):
with importlib_resources.path(bootstrap, bootstrap_file) as f:
shutil.copyfile(f.absolute(), bootstrap_target / f.name) | Copy bootstrap code from shiv into the pyz.
This function is excluded from type checking due to the conditional import.
:param bootstrap_target: The temporary directory where we are staging pyz contents. |
def default(self, o):
if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return o.isoformat()
if isinstance(o, decimal.Decimal):
return float(o)
return json.JSONEncoder.default(self, o) | Encode JSON.
:return str: A JSON encoded string |
def handler_for_name(fq_name):
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name | Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called. |
def write(self, filename, entities, sortkey=None, columns=None):
if os.path.exists(filename):
raise IOError('File exists: %s'%filename)
if sortkey:
entities = sorted(entities, key=lambda x:x[sortkey])
if not columns:
columns = set()
for entity in entities:
columns |= set(entity.keys())
columns = sorted(columns)
with open(filename, 'wb') as f:
writer = unicodecsv.writer(f)
writer.writerow(columns)
for entity in entities:
writer.writerow([entity.get(column) for column in columns]) | Write entities out to filename in csv format.
Note: this doesn't write directly into a Zip archive, because this behavior
is difficult to achieve with Zip archives. Use make_zip() to create a new
GTFS Zip archive. |
def updateImage(self, imgdata, xaxis=None, yaxis=None):
imgdata = imgdata.T
self.img.setImage(imgdata)
if xaxis is not None and yaxis is not None:
xscale = 1.0/(imgdata.shape[0]/xaxis[-1])
yscale = 1.0/(imgdata.shape[1]/yaxis[-1])
self.resetScale()
self.img.scale(xscale, yscale)
self.imgScale = (xscale, yscale)
self.imageArray = np.fliplr(imgdata)
self.updateColormap() | Updates the Widget image directly.
:type imgdata: numpy.ndarray, see :meth:`pyqtgraph:pyqtgraph.ImageItem.setImage`
:param xaxis: x-axis values, length should match dimension 1 of imgdata
:param yaxis: y-axis values, length should match dimension 0 of imgdata |
def __check_conflict_fronds(x, y, w, z, dfs_data):
if x < 0 and w < 0 and (x == y or w == z):
if x == w:
return True
return False
if b(x, dfs_data) == b(w, dfs_data) and x > w and w > y and y > z:
return False
if x < 0 or w < 0:
if x < 0:
u = abs(x)
t = y
x = w
y = z
else:
u = abs(w)
t = z
if b(x, dfs_data) == u and y < u and \
(x, y) in __dfsify_branch_uv(u, t, dfs_data):
return True
return False
return False | Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise. |
def tracker_class(clsname):
stats = server.stats
if not stats:
bottle.redirect('/tracker')
stats.annotate()
return dict(stats=stats, clsname=clsname) | Get class instance details. |
def matches(self, properties):
try:
return self.comparator(self.value, properties[self.name])
except KeyError:
return False | Tests if the given criterion matches this LDAP criterion
:param properties: A dictionary of properties
:return: True if the properties matches this criterion, else False |
def noinject(module_name=None, module_prefix='[???]', DEBUG=False, module=None, N=0, via=None):
if PRINT_INJECT_ORDER:
from utool._internal import meta_util_dbg
callername = meta_util_dbg.get_caller_name(N=N + 1, strict=False)
lineno = meta_util_dbg.get_caller_lineno(N=N + 1, strict=False)
suff = ' via %s' % (via,) if via else ''
fmtdict = dict(N=N, lineno=lineno, callername=callername,
modname=module_name, suff=suff)
msg = '[util_inject] N={N} {modname} is imported by {callername} at lineno={lineno}{suff}'.format(**fmtdict)
if DEBUG_SLOW_IMPORT:
global PREV_MODNAME
seconds = tt.toc()
import_times[(PREV_MODNAME, module_name)] = seconds
PREV_MODNAME = module_name
builtins.print(msg)
if DEBUG_SLOW_IMPORT:
tt.tic()
if EXIT_ON_INJECT_MODNAME == module_name:
builtins.print('...exiting')
assert False, 'exit in inject requested' | Use in modules that do not have inject in them
Does not inject anything into the module. Just lets utool know that a module
is being imported so the import order can be debuged |
def connect(self, addr):
if addr.find(':') == -1:
addr += ':5555'
output = self.run_cmd('connect', addr)
return 'unable to connect' not in output | Call adb connect
Return true when connect success |
def install_missing(name, version=None, source=None):
choc_path = _find_chocolatey(__context__, __salt__)
if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.8.24'):
log.warning('installmissing is deprecated, using install')
return install(name, version=version)
cmd = [choc_path, 'installmissing', name]
if version:
cmd.extend(['--version', version])
if source:
cmd.extend(['--source', source])
cmd.extend(_yes(__context__))
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Running chocolatey failed: {0}'.format(result['stdout'])
)
return result['stdout'] | Instructs Chocolatey to install a package if it doesn't already exist.
.. versionchanged:: 2014.7.0
If the minion has Chocolatey >= 0.9.8.24 installed, this function calls
:mod:`chocolatey.install <salt.modules.chocolatey.install>` instead, as
``installmissing`` is deprecated as of that version and will be removed
in Chocolatey 1.0.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
source
Chocolatey repository (directory, share or remote URL feed) the package
comes from. Defaults to the official Chocolatey feed.
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_missing <package name>
salt '*' chocolatey.install_missing <package name> version=<package version> |
def text_to_char_array(original, alphabet):
r
return np.asarray([alphabet.label_from_string(c) for c in original]) | r"""
Given a Python string ``original``, remove unsupported characters, map characters
to integers and return a numpy array representing the processed string. |
def is_submodule_included(src, tgt):
if tgt is None or not hasattr(tgt, 'i_orig_module'):
return True
if (tgt.i_orig_module.keyword == 'submodule' and
src.i_orig_module != tgt.i_orig_module and
src.i_orig_module.i_modulename == tgt.i_orig_module.i_modulename):
if src.i_orig_module.search_one('include',
tgt.i_orig_module.arg) is None:
return False
return True | Check that the tgt's submodule is included by src, if they belong
to the same module. |
async def close_wallet(handle: int) -> None:
logger = logging.getLogger(__name__)
logger.debug("close_wallet: >>> handle: %i", handle)
if not hasattr(close_wallet, "cb"):
logger.debug("close_wallet: Creating callback")
close_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_handle = c_int32(handle)
await do_call('indy_close_wallet',
c_handle,
close_wallet.cb)
logger.debug("close_wallet: <<<") | Closes opened wallet and frees allocated resources.
:param handle: wallet handle returned by indy_open_wallet.
:return: Error code |
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs):
user_id = kwargs.get('user_id')
resource_attr_qry = db.DBSession.query(ResourceAttr).filter(
ResourceAttr.ref_key == ref_key,
or_(
ResourceAttr.network_id==ref_id,
ResourceAttr.node_id==ref_id,
ResourceAttr.link_id==ref_id,
ResourceAttr.group_id==ref_id
))
if type_id is not None:
attr_ids = []
rs = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
for r in rs:
attr_ids.append(r.attr_id)
resource_attr_qry = resource_attr_qry.filter(ResourceAttr.attr_id.in_(attr_ids))
resource_attrs = resource_attr_qry.all()
return resource_attrs | Get all the resource attributes for a given resource.
If type_id is specified, only
return the resource attributes within the type. |
def _get_function_commands(module):
nodes = (n for n in module.body if isinstance(n, ast.FunctionDef))
for func in nodes:
docstring = ast.get_docstring(func)
for commands, _ in usage.parse_commands(docstring):
yield _EntryPoint(commands[0], next(iter(commands[1:]), None),
func.name) | Yield all Command objects represented by python functions in the module.
Function commands consist of all top-level functions that contain
docopt-style docstrings.
Args:
module: An ast.Module object used to retrieve docopt-style commands.
Yields:
Command objects that represent entry points to append to setup.py. |
def process_uncaught_exception(self, e):
exc_file_fullpath, exc_file, exc_lineno, exc_func, exc_line = (
workflows.logging.get_exception_source()
)
added_information = {
"workflows_exc_lineno": exc_lineno,
"workflows_exc_funcName": exc_func,
"workflows_exc_line": exc_line,
"workflows_exc_pathname": exc_file_fullpath,
"workflows_exc_filename": exc_file,
}
for field in filter(lambda x: x.startswith("workflows_log_"), dir(e)):
added_information[field[14:]] = getattr(e, field, None)
self.log.critical(
"Unhandled service exception: %s", e, exc_info=True, extra=added_information
) | This is called to handle otherwise uncaught exceptions from the service.
The service will terminate either way, but here we can do things such as
gathering useful environment information and logging for posterity. |
def isexe(*components):
_path = path(*components)
return isfile(_path) and os.access(_path, os.X_OK) | Return whether a path is an executable file.
Arguments:
path (str): Path of the file to check.
Examples:
>>> fs.isexe("/bin/ls")
True
>>> fs.isexe("/home")
False
>>> fs.isexe("/not/a/real/path")
False
Returns:
bool: True if file is executable, else false. |
def find_following_working_day(self, day):
day = cleaned_date(day)
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day | Looks for the following working day, if not already a working day.
**WARNING**: this function doesn't take into account the calendar
holidays, only the days of the week and the weekend days parameters. |
def transform(self, X):
if self.func is None:
return X
else:
Xt, Xc = get_ts_data_parts(X)
n_samples = len(Xt)
Xt = self.func(Xt, **self.func_kwargs)
if len(Xt) != n_samples:
raise ValueError("FunctionTransformer changes sample number (not supported).")
if Xc is not None:
Xt = TS_Data(Xt, Xc)
return Xt | Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data |
def configure(self, debug=None, quiet=None, verbosity=None, compile=None, compiler_factory=None, **kwargs):
if debug is not None:
self.arg_debug = debug
if quiet is not None:
self.arg_quiet = quiet
if verbosity is not None:
self.arg_verbosity = verbosity
if compile is not None:
self.compile = compile
if compiler_factory is not None:
self.compiler_factory = compiler_factory
if kwargs:
self.command.update(**kwargs) | configure managed args |
def get_mesh_dict(self):
if self._mesh is None:
msg = ("run_mesh has to be done.")
raise RuntimeError(msg)
retdict = {'qpoints': self._mesh.qpoints,
'weights': self._mesh.weights,
'frequencies': self._mesh.frequencies,
'eigenvectors': self._mesh.eigenvectors,
'group_velocities': self._mesh.group_velocities}
return retdict | Returns calculated mesh sampling phonons
Returns
-------
dict
keys: qpoints, weights, frequencies, eigenvectors, and
group_velocities
Each value for the corresponding key is explained as below.
qpoints: ndarray
q-points in reduced coordinates of reciprocal lattice
dtype='double'
shape=(ir-grid points, 3)
weights: ndarray
Geometric q-point weights. Its sum is the number of grid
points.
dtype='intc'
shape=(ir-grid points,)
frequencies: ndarray
Phonon frequencies at ir-grid points. Imaginary frequenies are
represented by negative real numbers.
dtype='double'
shape=(ir-grid points, bands)
eigenvectors: ndarray
Phonon eigenvectors at ir-grid points. See the data structure
at np.linalg.eigh.
dtype='complex'
shape=(ir-grid points, bands, bands)
group_velocities: ndarray
Phonon group velocities at ir-grid points.
dtype='double'
shape=(ir-grid points, bands, 3) |
def inf_sup(u):
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0) | IS operator. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.