code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_mode(self, gpio):
res = yield from self._pigpio_aio_command(_PI_CMD_MODEG, gpio, 0)
return _u2i(res) | Returns the gpio mode.
gpio:= 0-53.
Returns a value as follows
. .
0 = INPUT
1 = OUTPUT
2 = ALT5
3 = ALT4
4 = ALT0
5 = ALT1
6 = ALT2
7 = ALT3
. .
...
print(pi.get_mode(0))
4
... |
def _encoder(self):
if self.source_lang == 'en':
return Transliterator._dummy_coder
else:
weights = load_transliteration_table(self.source_lang)
encoder_weights = weights["encoder"]
return Transliterator._transliterate_string(encoder_weights) | Transliterate a string from the input language to English. |
def automatic_parser(result, dtypes={}, converters={}):
np.seterr(all='raise')
parsed = {}
for filename, contents in result['output'].items():
if dtypes.get(filename) is None:
dtypes[filename] = None
if converters.get(filename) is None:
converters[filename] = None
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parsed[filename] = np.genfromtxt(io.StringIO(contents),
dtype=dtypes[filename],
converters=converters[filename]
).tolist()
return parsed | Try and automatically convert strings formatted as tables into nested
list structures.
Under the hood, this function essentially applies the genfromtxt function
to all files in the output, and passes it the additional kwargs.
Args:
result (dict): the result to parse.
dtypes (dict): a dictionary containing the dtype specification to perform
parsing for each available filename. See the numpy genfromtxt
documentation for more details on how to format these. |
def insert(table, values=(), **kwargs):
values = dict(values, **kwargs).items()
sql, args = makeSQL("INSERT", table, values=values)
return execute(sql, args).lastrowid | Convenience wrapper for database INSERT. |
def blow_out(self,
location: Union[types.Location, Well] = None
) -> 'InstrumentContext':
if location is None:
if not self._ctx.location_cache:
raise RuntimeError('No valid current location cache present')
else:
location = self._ctx.location_cache.labware
if isinstance(location, Well):
if location.parent.is_tiprack:
self._log.warning('Blow_out being performed on a tiprack. '
'Please re-check your code')
target = location.top()
elif isinstance(location, types.Location) and not \
isinstance(location.labware, Well):
raise TypeError(
'location should be a Well or None, but it is {}'
.format(location))
else:
raise TypeError(
'location should be a Well or None, but it is {}'
.format(location))
self.move_to(target)
self._hw_manager.hardware.blow_out(self._mount)
return self | Blow liquid out of the tip.
If :py:attr:`dispense` is used to completely empty a pipette,
usually a small amount of liquid will remain in the tip. This
method moves the plunger past its usual stops to fully remove
any remaining liquid from the tip. Regardless of how much liquid
was in the tip when this function is called, after it is done
the tip will be empty.
:param location: The location to blow out into. If not specified,
defaults to the current location of the pipette
:type location: :py:class:`.Well` or :py:class:`.Location` or None
:raises RuntimeError: If no location is specified and location cache is
None. This should happen if `blow_out` is called
without first calling a method that takes a
location (eg, :py:meth:`.aspirate`,
:py:meth:`dispense`)
:returns: This instance |
def image_create(self, disk, label=None, description=None):
params = {
"disk_id": disk.id if issubclass(type(disk), Base) else disk,
}
if label is not None:
params["label"] = label
if description is not None:
params["description"] = description
result = self.post('/images', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating an '
'Image from disk {}'.format(disk))
return Image(self, result['id'], result) | Creates a new Image from a disk you own.
:param disk: The Disk to imagize.
:type disk: Disk or int
:param label: The label for the resulting Image (defaults to the disk's
label.
:type label: str
:param description: The description for the new Image.
:type description: str
:returns: The new Image.
:rtype: Image |
def anonymized_formula(self):
anon_formula = super().anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str | An anonymized formula. Appends charge to the end
of anonymized composition |
def make_student(user):
tutor_group, owner_group = _get_user_groups()
user.is_staff = False
user.is_superuser = False
user.save()
owner_group.user_set.remove(user)
owner_group.save()
tutor_group.user_set.remove(user)
tutor_group.save() | Makes the given user a student. |
def connect(self, *names):
fromName, toName, rest = names[0], names[1], names[2:]
self.connectAt(fromName, toName)
if len(rest) != 0:
self.connect(toName, *rest) | Connects a list of names, one to the next. |
def on_message(self, message):
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {
'command': 'hello',
'protocols': [
'http://livereload.com/protocols/official-7',
],
'serverName': 'livereload-tornado',
}
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
logger.info('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self) | Handshake with livereload.js
1. client send 'hello'
2. server reply 'hello'
3. client send 'info' |
def send_query(text, service_endpoint='drum', query_args=None):
if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']:
url = base_url + service_endpoint
else:
logger.error('Invalid service endpoint: %s' % service_endpoint)
return ''
if query_args is None:
query_args = {}
query_args.update({'input': text})
res = requests.get(url, query_args, timeout=3600)
if not res.status_code == 200:
logger.error('Problem with TRIPS query: status code %s' %
res.status_code)
return ''
return res.text | Send a query to the TRIPS web service.
Parameters
----------
text : str
The text to be processed.
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default), "drum-dev", a nightly build, and "cwms" for use with
more general knowledge extraction.
query_args : Optional[dict]
A dictionary of arguments to be passed with the query.
Returns
-------
html : str
The HTML result returned by the web service. |
def _get_names(node, result):
if isinstance(node, ast.Name):
return node.id + result
elif isinstance(node, ast.Subscript):
return result
elif isinstance(node, ast.Starred):
return _get_names(node.value, result)
else:
return _get_names(node.value, result + '.' + node.attr) | Recursively finds all names. |
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
no_ack = no_ack or self.no_ack
auto_ack = auto_ack or self.auto_ack
message = self.backend.get(self.queue, no_ack=no_ack)
if message:
if auto_ack and not message.acknowledged:
message.ack()
if enable_callbacks:
self.receive(message.payload, message)
return message | Receive the next message waiting on the queue.
:returns: A :class:`carrot.backends.base.BaseMessage` instance,
or ``None`` if there's no messages to be received.
:keyword enable_callbacks: Enable callbacks. The message will be
processed with all registered callbacks. Default is disabled.
:keyword auto_ack: Override the default :attr:`auto_ack` setting.
:keyword no_ack: Override the default :attr:`no_ack` setting. |
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain) | Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here. |
def _check_for_python_keywords(self, kwargs):
kwargs_copy = copy.deepcopy(kwargs)
for key, val in iteritems(kwargs):
if isinstance(val, dict):
kwargs_copy[key] = self._check_for_python_keywords(val)
elif isinstance(val, list):
kwargs_copy[key] = self._iter_list_for_dicts(val)
else:
if key.endswith('_'):
strip_key = key.rstrip('_')
if keyword.iskeyword(strip_key):
kwargs_copy[strip_key] = val
kwargs_copy.pop(key)
return kwargs_copy | When Python keywords seen, mutate to remove trailing underscore. |
def clean_tarinfo(cls, tar_info):
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti | Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None |
def remove_listener(self, event_name, listener):
self.listeners[event_name].remove(listener)
return self | Removes a listener. |
def get_disk_usage(self, path=None):
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024) | Return the total, used and free disk space in bytes as named tuple,
or placeholder values simulating unlimited space if not set.
.. note:: This matches the return value of shutil.disk_usage().
Args:
path: The disk space is returned for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems). |
def add_handler(self, message_type, handler):
if message_type not in self._handlers:
self._handlers[message_type] = []
if handler not in self._handlers[message_type]:
self._handlers[message_type].append(handler) | Manage callbacks for message handlers. |
def get_random_hex(length):
if length <= 0:
return ''
return hexify(random.randint(pow(2, length*2), pow(2, length*4)))[0:length] | Return random hex string of a given length |
def Execute(self, action, *args, **kw):
action = self.Action(action, *args, **kw)
result = action([], [], self)
if isinstance(result, SCons.Errors.BuildError):
errstr = result.errstr
if result.filename:
errstr = result.filename + ': ' + errstr
sys.stderr.write("scons: *** %s\n" % errstr)
return result.status
else:
return result | Directly execute an action through an Environment |
def centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value) | Euclidean distance between the centroids of item_a and item_b.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. |
def get(self, path, default=_NoDefault, as_type=None, resolve_references=True):
value = self._source
steps_taken = []
try:
for step in path.split(self._separator):
steps_taken.append(step)
value = value[step]
if as_type:
return as_type(value)
elif isinstance(value, Mapping):
namespace = type(self)(separator=self._separator, missing=self._missing)
namespace._source = value
namespace._root = self._root
return namespace
elif resolve_references and isinstance(value, str):
return self._resolve(value)
else:
return value
except ConfiguredReferenceError:
raise
except KeyError as e:
if default is not _NoDefault:
return default
else:
missing_key = self._separator.join(steps_taken)
raise NotConfiguredError('no configuration for key {}'.format(missing_key), key=missing_key) from e | Gets a value for the specified path.
:param path: the configuration key to fetch a value for, steps
separated by the separator supplied to the constructor (default
``.``)
:param default: a value to return if no value is found for the
supplied path (``None`` is allowed)
:param as_type: an optional callable to apply to the value found for
the supplied path (possibly raising exceptions of its own if the
value can not be coerced to the expected type)
:param resolve_references: whether to resolve references in values
:return: the value associated with the supplied configuration key, if
available, or a supplied default value if the key was not found
:raises ConfigurationError: when no value was found for *path* and
*default* was not provided or a reference could not be resolved |
def get(self, name=None):
return self.app.shared_objects.get(name, self.plugin) | Returns requested shared objects, which were registered by the current plugin.
If access to objects of other plugins are needed, use :func:`access` or perform get on application level::
my_app.shared_objects.get(name="...")
:param name: Name of a request shared object
:type name: str or None |
def start(self, id):
path = partial(_path, self.adapter)
path = path(id)
return self._put(path) | start a specific tracker. |
def findInvariantPartitioning(self):
symorders = self.symorders[:]
_range = range(len(symorders))
while 1:
pos = self.findLowest(symorders)
if pos == -1:
self.symorders = symorders
return
for i in _range:
symorders[i] = symorders[i] * 2 + 1
symorders[pos] = symorders[pos] - 1
symorders = self.findInvariant(symorders) | Keep the initial ordering of the symmetry orders
but make all values unique. For example, if there are
two symmetry orders equal to 0, convert them to 0 and 1
and add 1 to the remaining orders
[0, 1, 0, 1]
should become
[0, 2, 1, 3] |
def int_args(self):
if self.ARG_REGS is None:
raise NotImplementedError()
for reg in self.ARG_REGS:
yield SimRegArg(reg, self.arch.bytes) | Iterate through all the possible arg positions that can only be used to store integer or pointer values
Does not take into account customizations.
Returns an iterator of SimFunctionArguments |
def new(self):
new_dashboard = models.Dashboard(
dashboard_title='[ untitled dashboard ]',
owners=[g.user],
)
db.session.add(new_dashboard)
db.session.commit()
return redirect(f'/superset/dashboard/{new_dashboard.id}/?edit=true') | Creates a new, blank dashboard and redirects to it in edit mode |
def set_data(self, data):
"Use this method to set the data for this blob"
if data is None:
self.data_size = 0
self.data = None
return
self.data_size = len(data)
self.data = ctypes.cast(ctypes.create_string_buffer(data), ctypes.c_void_p) | Use this method to set the data for this blob |
def retry(self):
if not self.paid and not self.forgiven and not self.closed:
stripe_invoice = self.api_retrieve()
updated_stripe_invoice = (
stripe_invoice.pay()
)
type(self).sync_from_stripe_data(updated_stripe_invoice)
return True
return False | Retry payment on this invoice if it isn't paid, closed, or forgiven. |
def capabilities(self):
caps = []
for cap in DeviceCapability:
if self._libinput.libinput_device_has_capability(self._handle, cap):
caps.append(cap)
return tuple(caps) | A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities. |
def logger(self):
if self._experiment:
return logging.getLogger('.'.join([self.name, self.experiment]))
elif self._projectname:
return logging.getLogger('.'.join([self.name, self.projectname]))
else:
return logging.getLogger('.'.join([self.name])) | The logger of this organizer |
def process_param(self):
self.log_response_message('got RETURNVALUE message')
r = self._reader
if tds_base.IS_TDS72_PLUS(self):
ordinal = r.get_usmallint()
else:
r.get_usmallint()
ordinal = self._out_params_indexes[self.return_value_index]
name = r.read_ucs2(r.get_byte())
r.get_byte()
param = tds_base.Column()
param.column_name = name
self.get_type_info(param)
param.value = param.serializer.read(r)
self.output_params[ordinal] = param
self.return_value_index += 1 | Reads and processes RETURNVALUE stream.
This stream is used to send OUTPUT parameters from RPC to client.
Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx |
def reduce(self, show_noisy=False):
if not show_noisy:
for log in self.quiet_logs:
yield log['raw'].strip()
else:
for log in self.noisy_logs:
yield log['raw'].strip() | Yield the reduced log lines
:param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that
were deleted. |
def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
assert_schema(schema)
errors = schema._validation_errors
if errors is None:
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
errors = context.errors
schema._validation_errors = errors
return errors | Validate a GraphQL schema.
Implements the "Type Validation" sub-sections of the specification's "Type System"
section.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the Schema is valid. |
def merge_class(base, other):
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values()) | Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects |
def get_structural_variant(self, variant):
query = {
'chrom': variant['chrom'],
'end_chrom': variant['end_chrom'],
'sv_type': variant['sv_type'],
'$and': [
{'pos_left': {'$lte': variant['pos']}},
{'pos_right': {'$gte': variant['pos']}},
]
}
res = self.db.structural_variant.find(query).sort('pos_left',1)
match = None
distance = None
closest_hit = None
for hit in res:
if hit['end_left'] > variant['end']:
continue
if hit['end_right'] < variant['end']:
continue
distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) +
abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2))
if closest_hit is None:
match = hit
closest_hit = distance
continue
if distance < closest_hit:
match = hit
closest_hit = distance
return match | Check if there are any overlapping sv clusters
Search the sv variants with chrom start end_chrom end and sv_type
Args:
variant (dict): A variant dictionary
Returns:
variant (dict): A variant dictionary |
def get_next_property(self):
try:
next_object = self.next()
except StopIteration:
raise IllegalState('no more elements available in this list')
except Exception:
raise OperationFailed()
else:
return next_object | Gets the next ``Property`` in this list.
:return: the next ``Property`` in this list. The ``has_next()`` method should be used to test that a next ``Property`` is available before calling this method.
:rtype: ``osid.Property``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.* |
def get_source_and_pgp_key(source_and_key):
try:
source, key = source_and_key.split('|', 2)
return source, key or None
except ValueError:
return source_and_key, None | Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string. |
def get_watchman_sockpath(binpath='watchman'):
path = os.getenv('WATCHMAN_SOCK')
if path:
return path
cmd = [binpath, '--output-encoding=json', 'get-sockname']
result = subprocess.check_output(cmd)
result = json.loads(result)
return result['sockname'] | Find the watchman socket or raise. |
def append(self, fdata, offset, query='/content/uploads'):
query = '%s/%s/%s/' % (query, self.uid, offset)
_r = self.connector.put(query, fdata, log_data=False, auto_create_json_str=False)
juicer.utils.Log.log_notice("Appending to: %s" % query)
juicer.utils.Log.log_debug("Continuing upload with append. POST returned with data: %s" % str(_r.content))
return _r.status_code | append binary data to an upload
`fdata` - binary data to send to pulp
`offset` - the amount of previously-uploaded data |
def get_var_shape(self, name):
rank = self.get_var_rank(name)
name = create_string_buffer(name)
arraytype = ndpointer(dtype='int32',
ndim=1,
shape=(MAXDIMS, ),
flags='F')
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
self.library.get_var_shape.argtypes = [c_char_p, arraytype]
self.library.get_var_shape(name, shape)
return tuple(shape[:rank]) | Return shape of the array. |
def delete_node_nto1(node_list, begin, node, end):
if begin is None:
assert node is not None
begin = node.precedence
elif not isinstance(begin, list):
begin = [begin]
if end.in_or_out:
for nb_ in begin:
nb_.out_redirect(node.single_input, node.single_output)
else:
for nb_ in begin:
target_var_name = node.single_input
assert target_var_name in nb_.output.values()
end.in_redirect(node.single_output, target_var_name)
for nb_ in begin:
nb_.successor = [end if v_ == node else v_ for v_ in nb_.successor]
end.precedence = [v_ for v_ in end.precedence if v_ != node] + node.precedence
node_list.remove(node)
return node_list | delete the node which has n-input and 1-output |
def beacon(config):
log.trace('salt proxy beacon called')
_config = {}
list(map(_config.update, config))
return _run_proxy_processes(_config['proxies']) | Handle configured proxies
.. code-block:: yaml
beacons:
salt_proxy:
- proxies:
p8000: {}
p8001: {} |
def update(self, entity):
assert isinstance(entity, Entity), "Error: entity must have an instance of Entity"
return self.__collection.update({'_id': entity._id}, {'$set': entity.as_dict()}) | Executes collection's update method based on keyword args.
Example::
manager = EntityManager(Product)
p = Product()
p.name = 'new name'
p.description = 'new description'
p.price = 300.0
yield manager.update(p) |
def dns(self):
dns = {
'elb': self.dns_elb(),
'elb_region': self.dns_elb_region(),
'global': self.dns_global(),
'region': self.dns_region(),
'instance': self.dns_instance(),
}
return dns | DNS details. |
def clear_duration(self):
if (self.get_duration_metadata().is_read_only() or
self.get_duration_metadata().is_required()):
raise errors.NoAccess()
self._my_map['duration'] = self._duration_default | Clears the duration.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def normalize_datetime_to_utc(dt):
return datetime.datetime(
*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc
) | Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()`` |
def mds(means, weights, d):
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) |
def get_version(self, as_tuple=False):
if as_tuple:
return uwsgi.version_info
return decode(uwsgi.version) | Returns uWSGI version string or tuple.
:param bool as_tuple:
:rtype: str|tuple |
def get_email_address(self):
if self.login is None:
self.login = self.generate_login()
available_domains = self.available_domains
if self.domain is None:
self.domain = random.choice(available_domains)
elif self.domain not in available_domains:
raise ValueError('Domain not found in available domains!')
return u'{0}{1}'.format(self.login, self.domain) | Return full email address from login and domain from params in class
initialization or generate new. |
def _call(self, x, out=None):
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
out.assign(self.multiplicand * x)
else:
raise ValueError('can only use `out` with `LinearSpace` range') | Multiply ``x`` and write to ``out`` if given. |
def report_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
self.update_hosting_device_status(context, host,
{const.HD_DEAD: hosting_device_ids}) | Report that a hosting device is determined to be dead.
:param context: contains user information
:param host: originator of callback
:param hosting_device_ids: list of non-responding hosting devices |
def _visual_center(line, width):
spaces = max(width - _visual_width(line), 0)
left_padding = int(spaces / 2)
right_padding = spaces - left_padding
return (left_padding * " ") + line + (right_padding * " ") | Center align string according to it's visual width |
def check_user_permissions(payload, user_pk):
for perm_type in ['add', 'remove']:
user_pks = payload.get('users', {}).get(perm_type, {}).keys()
if user_pk in user_pks:
raise exceptions.PermissionDenied("You cannot change your own permissions") | Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``. |
def ssm_create_association(name=None, kwargs=None, instance_id=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The ssm_create_association action must be called with '
'-a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instance_id = _get_node(name)['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'ssm_document' not in kwargs:
log.error('A ssm_document is required.')
return False
params = {'Action': 'CreateAssociation',
'InstanceId': instance_id,
'Name': kwargs['ssm_document']}
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
product='ssm',
opts=__opts__,
sigver='4')
log.info(result)
return result | Associates the specified SSM document with the specified instance
http://docs.aws.amazon.com/ssm/latest/APIReference/API_CreateAssociation.html
CLI Examples:
.. code-block:: bash
salt-cloud -a ssm_create_association ec2-instance-name ssm_document=ssm-document-name |
def policy_create(request, **kwargs):
body = {'policy': kwargs}
policy = neutronclient(request).create_qos_policy(body=body).get('policy')
return QoSPolicy(policy) | Create a QoS Policy.
:param request: request context
:param name: name of the policy
:param description: description of policy
:param shared: boolean (true or false)
:return: QoSPolicy object |
def transform_cb(self, setting, value):
self.make_callback('transform')
whence = 0
self.redraw(whence=whence) | Handle callback related to changes in transformations. |
def max(self):
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max | The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int |
def _show_context_menu(self, point):
tc = self.textCursor()
nc = self.cursorForPosition(point)
if not nc.position() in range(tc.selectionStart(), tc.selectionEnd()):
self.setTextCursor(nc)
self._mnu = self.get_context_menu()
if len(self._mnu.actions()) > 1 and self.show_context_menu:
self._mnu.popup(self.mapToGlobal(point)) | Shows the context menu |
def to_pixel(self, wcs, mode='all'):
pixel_params = self._to_pixel_params(wcs, mode=mode)
return EllipticalAnnulus(**pixel_params) | Convert the aperture to an `EllipticalAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `EllipticalAnnulus` object
An `EllipticalAnnulus` object. |
def get_blob(profile, sha):
resource = "/blobs/" + sha
data = api.get_request(profile, resource)
return prepare(data) | Fetch a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the blob to fetch.
Returns:
A dict with data about the blob. |
def population(self):
"Class containing the population and all the individuals generated"
try:
return self._p
except AttributeError:
self._p = self._population_class(base=self,
tournament_size=self._tournament_size,
classifier=self.classifier,
labels=self._labels,
es_extra_test=self.es_extra_test,
popsize=self._popsize,
random_generations=self._random_generations,
negative_selection=self._negative_selection)
return self._p | Class containing the population and all the individuals generated |
def populate_from_seqinfo(self, seqinfo):
for row in csv.DictReader(seqinfo):
node = self.index.get(row['tax_id'])
if node:
node.sequence_ids.add(row['seqname']) | Populate sequence_ids below this node from a seqinfo file object. |
def functions(self):
def is_function(comment):
return isinstance(comment, FunctionDoc) and not comment.member
return self._filtered_iter(is_function) | Returns a generator of all standalone functions in the file, in textual
order.
>>> file = FileDoc('module.js', read_file('examples/module.js'))
>>> list(file.functions)[0].name
'the_first_function'
>>> list(file.functions)[3].name
'not_auto_discovered' |
def rename(self, name_dict=None, inplace=None, **names):
inplace = _check_inplace(inplace)
name_dict = either_dict_or_kwargs(name_dict, names, 'rename')
for k, v in name_dict.items():
if k not in self and k not in self.dims:
raise ValueError("cannot rename %r because it is not a "
"variable or dimension in this dataset" % k)
variables, coord_names, dims, indexes = self._rename_all(
name_dict=name_dict, dim_dict=name_dict)
return self._replace(variables, coord_names, dims=dims,
indexes=indexes, inplace=inplace) | Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or dimension names and
whose values are the desired names.
inplace : bool, optional
If True, rename variables and dimensions in-place. Otherwise,
return a new dataset object.
**names, optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
DataArray.rename |
def asDictionary(self):
template = {
"type": "dataLayer",
"dataSource": self._dataSource
}
if not self._fields is None:
template['fields'] = self._fields
return template | returns the value as a dictionary |
def built_datetime(self):
from datetime import datetime
try:
return datetime.fromtimestamp(self.state.build_done)
except TypeError:
return None | Return the built time as a datetime object |
def _notify_single_item(self, item):
triggered_channels = set()
for key_set in self.watch_keys:
plucked = {
key_name: item[key_name]
for key_name in key_set if key_name in item
}
route_keys = expand_dict_as_keys(plucked)
for route in route_keys:
channels = self.get_route_items(route) or {}
LOG.debug('route table match: %s -> %s', route, channels)
if not channels:
LOG.debug(
'no subscribers for message.\nkey %s\nroutes: %s',
route,
self._routes
)
for channel in channels:
if channel in triggered_channels:
LOG.debug('skipping dispatch to %s', channel)
continue
LOG.debug('routing dispatch to %s: %s', channel, item)
try:
channel.notify(item) and triggered_channels.add(channel)
except Exception:
LOG.exception('Channel notification failed')
return triggered_channels | Route inbound items to individual channels |
def _process_list(self, l):
if hasattr(self, l):
t = getattr(self, l)
def proc(inp):
w = inp.strip()
if w.startswith('`'):
r = re.compile(w[1:-1])
return [u for u in [m.group() for m in [r.match(x) for x in dir(self)] if m] if isinstance(getattr(self, u), QObject)]
else:
return [w]
return list(set([y for x in map(proc, t.split(',')) for y in x]))
return [] | Processes a list of widget names.
If any name is between `` then it is supposed to be a regex. |
def printstartfinish(verb, inp=None, kcount=None):
r
if inp:
if verb > 1:
ttxt = str(timedelta(seconds=default_timer() - inp))
ktxt = ' '
if kcount:
ktxt += str(kcount) + ' kernel call(s)'
print('\n:: empymod END; runtime = ' + ttxt + ' ::' + ktxt + '\n')
else:
t0 = default_timer()
if verb > 2:
print("\n:: empymod START ::\n")
return t0 | r"""Print start and finish with time measure and kernel count. |
def rpXRDS(request):
return util.renderXRDS(
request,
[RP_RETURN_TO_URL_TYPE],
[util.getViewURL(request, finishOpenID)]) | Return a relying party verification XRDS document |
def accelerator_experiments(self, key, value):
result = []
a_value = force_single_element(value.get('a'))
e_values = [el for el in force_list(value.get('e')) if el != '-']
zero_values = force_list(value.get('0'))
if a_value and not e_values:
result.append({'accelerator': a_value})
if len(e_values) == len(zero_values):
for e_value, zero_value in zip(e_values, zero_values):
result.append({
'legacy_name': e_value,
'record': get_record_ref(zero_value, 'experiments'),
})
else:
for e_value in e_values:
result.append({'legacy_name': e_value})
return result | Populate the ``accelerator_experiments`` key. |
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
atol = np.array([1., 1. ,1.]) * atol
return cuc.coord_list_mapping_pbc(subset, superset, atol) | Gives the index mapping from a subset to a superset.
Superset cannot contain duplicate matching rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset |
def create(self, name, *args, **kwargs):
cont = kwargs.get("cont")
if cont:
api = self.api
rgn = api.region_name
cf = api.identity.object_store[rgn].client
cf.get_container(cont)
return super(ImageTasksManager, self).create(name, *args, **kwargs) | Standard task creation, but first check for the existence of the
containers, and raise an exception if they don't exist. |
def update_trackers(self):
direct_approved_topics = self.topics.filter(approved=True).order_by('-last_post_on')
self.direct_topics_count = direct_approved_topics.count()
self.direct_posts_count = direct_approved_topics.aggregate(
total_posts_count=Sum('posts_count'))['total_posts_count'] or 0
if direct_approved_topics.exists():
self.last_post_id = direct_approved_topics[0].last_post_id
self.last_post_on = direct_approved_topics[0].last_post_on
else:
self.last_post_id = None
self.last_post_on = None
self._simple_save() | Updates the denormalized trackers associated with the forum instance. |
def get_archive_formats():
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats | Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description) |
def _find_data_path_schema(data_path, schema_name):
if not data_path or data_path == '/' or data_path == '.':
return None
directory = os.path.dirname(data_path)
path = glob.glob(os.path.join(directory, schema_name))
if not path:
return _find_schema(directory, schema_name)
return path[0] | Starts in the data file folder and recursively looks
in parents for `schema_name` |
def findAll(haystack, needle) :
h = haystack
res = []
f = haystack.find(needle)
offset = 0
while (f >= 0) :
res.append(f+offset)
offset += f+len(needle)
h = h[f+len(needle):]
f = h.find(needle)
return res | returns a list of all occurances of needle in haystack |
def project_ranges(cb, msg, attributes):
if skip(cb, msg, attributes):
return msg
plot = get_cb_plot(cb)
x0, x1 = msg.get('x_range', (0, 1000))
y0, y1 = msg.get('y_range', (0, 1000))
extents = x0, y0, x1, y1
x0, y0, x1, y1 = project_extents(extents, plot.projection,
plot.current_frame.crs)
coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}
return {k: v for k, v in coords.items() if k in attributes} | Projects ranges supplied by a callback. |
def prune_by_ngram_count_per_work(self, minimum=None, maximum=None,
label=None):
self._logger.info('Pruning results by n-gram count per work')
matches = self._matches
keep_ngrams = matches[constants.NGRAM_FIELDNAME].unique()
if label is not None:
matches = matches[matches[constants.LABEL_FIELDNAME] == label]
if minimum and maximum:
keep_ngrams = matches[
(matches[constants.COUNT_FIELDNAME] >= minimum) &
(matches[constants.COUNT_FIELDNAME] <= maximum)][
constants.NGRAM_FIELDNAME].unique()
elif minimum:
keep_ngrams = matches[
matches[constants.COUNT_FIELDNAME] >= minimum][
constants.NGRAM_FIELDNAME].unique()
elif maximum:
keep_ngrams = matches[
self._matches[constants.COUNT_FIELDNAME] <= maximum][
constants.NGRAM_FIELDNAME].unique()
self._matches = self._matches[self._matches[
constants.NGRAM_FIELDNAME].isin(keep_ngrams)] | Removes results rows if the n-gram count for all works bearing that
n-gram is outside the range specified by `minimum` and
`maximum`.
That is, if a single witness of a single work has an n-gram
count that falls within the specified range, all result rows
for that n-gram are kept.
If `label` is specified, the works checked are restricted to
those associated with `label`.
:param minimum: minimum n-gram count
:type minimum: `int`
:param maximum: maximum n-gram count
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str` |
def create_variable(ncfile, name, datatype, dimensions) -> None:
default = fillvalue if (datatype == 'f8') else None
try:
ncfile.createVariable(
name, datatype, dimensions=dimensions, fill_value=default)
ncfile[name].long_name = name
except BaseException:
objecttools.augment_excmessage(
'While trying to add variable `%s` with datatype `%s` '
'and dimensions `%s` to the NetCDF file `%s`'
% (name, datatype, dimensions, get_filepath(ncfile))) | Add a new variable with the given name, datatype, and dimensions
to the given NetCDF file.
Essentially, |create_variable| just calls the equally named method
of the NetCDF library, but adds information to possible error messages:
>>> from hydpy import TestIO
>>> from hydpy.core.netcdftools import netcdf4
>>> with TestIO():
... ncfile = netcdf4.Dataset('test.nc', 'w')
>>> from hydpy.core.netcdftools import create_variable
>>> try:
... create_variable(ncfile, 'var1', 'f8', ('dim1',))
... except BaseException as exc:
... print(str(exc).strip('"')) # doctest: +ELLIPSIS
While trying to add variable `var1` with datatype `f8` and \
dimensions `('dim1',)` to the NetCDF file `test.nc`, the following error \
occurred: ...
>>> from hydpy.core.netcdftools import create_dimension
>>> create_dimension(ncfile, 'dim1', 5)
>>> create_variable(ncfile, 'var1', 'f8', ('dim1',))
>>> import numpy
>>> numpy.array(ncfile['var1'][:])
array([ nan, nan, nan, nan, nan])
>>> ncfile.close() |
def do_set(self, params):
self.set(params.path, decoded(params.value), version=params.version) | \x1b[1mNAME\x1b[0m
set - Updates the znode's value
\x1b[1mSYNOPSIS\x1b[0m
set <path> <value> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> set /foo 'bar'
> set /foo 'verybar' 3 |
def modify_user(self, username, data):
if 'password' in data:
char_set = string.ascii_letters + string.digits
data['pwd_salt'] = ''.join(random.choice(char_set) for x in range(8))
data['pwd_hash'] = self._gen_hash(data['password'], data['pwd_salt'])
del(data['password'])
sql = "UPDATE user SET "
sql += ', '.join("%s = ?" % k for k in sorted(data))
sql += " WHERE username = ?"
vals = []
for k in sorted(data):
vals.append(data[k])
vals.append(username)
try:
self._db_curs.execute(sql, vals)
self._db_conn.commit()
except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:
raise AuthError(error) | Modify user in SQLite database.
Since username is used as primary key and we only have a single
argument for it we can't modify the username right now. |
def _validate_int(name, value, limits=(), strip='%'):
comment = ''
try:
if isinstance(value, string_types):
value = value.strip(' ' + strip)
value = int(value)
except (TypeError, ValueError):
comment += '{0} must be an integer '.format(name)
else:
if len(limits) == 2:
if value < limits[0] or value > limits[1]:
comment += '{0} must be in the range [{1[0]}, {1[1]}] '.format(name, limits)
return value, comment | Validate the named integer within the supplied limits inclusive and
strip supplied unit characters |
def api_run_get(run_id):
data = current_app.config["data"]
run = data.get_run_dao().get(run_id)
records_total = 1 if run is not None else 0
if records_total == 0:
return Response(
render_template(
"api/error.js",
error_code=404,
error_message="Run %s not found." % run_id),
status=404,
mimetype="application/json")
records_filtered = records_total
return Response(render_template("api/runs.js", runs=[run], draw=1,
recordsTotal=records_total,
recordsFiltered=records_filtered,
full_object=True),
mimetype="application/json") | Return a single run as a JSON object. |
def start(self):
self.loop.run_until_complete(self._consumer.start())
self.loop.run_until_complete(self._producer.start())
self._consumer_task = self.loop.create_task(self._consume_event_callback()) | This function starts the brokers interaction with the kafka stream |
def _parse_model(topology, scope, model, inputs=None, outputs=None):
if inputs is None:
inputs = list()
if outputs is None:
outputs = list()
model_type = model.WhichOneof('Type')
if model_type in ['pipeline', 'pipelineClassifier', 'pipelineRegressor']:
_parse_pipeline_model(topology, scope, model, inputs, outputs)
elif model_type in ['neuralNetworkClassifier', 'neuralNetworkRegressor', 'neuralNetwork']:
_parse_neural_network_model(topology, scope, model, inputs, outputs)
else:
_parse_simple_model(topology, scope, model, inputs, outputs) | This is a delegate function of all top-level parsing functions. It does nothing but call a proper function
to parse the given model. |
def remove_all(self, items):
check_not_none(items, "Value can't be None")
data_items = []
for item in items:
check_not_none(item, "Value can't be None")
data_items.append(self._to_data(item))
return self._encode_invoke(list_compare_and_remove_all_codec, values=data_items) | Removes all of the elements that is present in the specified collection from this list.
:param items: (Collection), the specified collection.
:return: (bool), ``true`` if this list changed as a result of the call. |
def _init(self):
self.tn = telnetlib.Telnet(self.ip, self.port)
self.tn.read_until('User Name')
self.tn.write('apc\r\n')
self.tn.read_until('Password')
self.tn.write('apc\r\n')
self.until_done() | Initialize the telnet connection |
def evaluate(self, x, y, flux, x_0, y_0, sigma):
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma))))) | Model function Gaussian PSF model. |
def process_log_event(event, context):
init()
serialized = event['awslogs'].pop('data')
data = json.loads(zlib.decompress(
base64.b64decode(serialized), 16 + zlib.MAX_WBITS))
msg = get_sentry_message(config, data)
if msg is None:
return
if config['sentry_dsn']:
send_sentry_message(config['sentry_dsn'], msg)
elif config['sentry_sqs']:
sqs.send_message(
QueueUrl=config['sentry_sqs']) | Lambda Entrypoint - Log Subscriber
Format log events and relay to sentry (direct or sqs) |
def endure_multi(self, keys, persist_to=-1, replicate_to=-1,
timeout=5.0, interval=0.010, check_removed=False):
return _Base.endure_multi(self, keys, persist_to=persist_to,
replicate_to=replicate_to,
timeout=timeout, interval=interval,
check_removed=check_removed) | Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure` |
def create_storage(self, size=10, tier='maxiops', title='Storage disk', zone='fi-hel1', backup_rule={}):
body = {
'storage': {
'size': size,
'tier': tier,
'title': title,
'zone': zone,
'backup_rule': backup_rule
}
}
res = self.post_request('/storage', body)
return Storage(cloud_manager=self, **res['storage']) | Create a Storage object. Returns an object based on the API's response. |
def pix2sky_vec(self, pixel, r, theta):
ra1, dec1 = self.pix2sky(pixel)
x, y = pixel
a = [x + r * np.cos(np.radians(theta)),
y + r * np.sin(np.radians(theta))]
locations = self.pix2sky(a)
ra2, dec2 = locations
a = gcd(ra1, dec1, ra2, dec2)
pa = bear(ra1, dec1, ra2, dec2)
return ra1, dec1, a, pa | Given and input position and vector in pixel coordinates, calculate
the equivalent position and vector in sky coordinates.
Parameters
----------
pixel : (int,int)
origin of vector in pixel coordinates
r : float
magnitude of vector in pixels
theta : float
angle of vector in degrees
Returns
-------
ra, dec : float
The (ra, dec) of the origin point (degrees).
r, pa : float
The magnitude and position angle of the vector (degrees). |
def plot_contour_labels(self, new_fig=True):
timestamps = []
pitch = []
if new_fig:
p.figure()
for interval, contours in self.contour_labels.items():
for contour in contours:
x = self.pitch_obj.timestamps[contour[0]:contour[1]]
y = [interval]*len(x)
timestamps.extend(x)
pitch.extend(y)
data = np.array([timestamps, pitch]).T
data = np.array(sorted(data, key=lambda xx: xx[0]))
p.plot(data[:, 0], data[:, 1], 'g-') | Plots the labelled contours! |
def json_exception(context, request):
request.response.status = context.code
return {'error': context._status, 'messages': context.message} | Always return json content in the body of Exceptions to xhr requests. |
def _read(self, command, future):
response = self._reader.gets()
if response is not False:
if isinstance(response, hiredis.ReplyError):
if response.args[0].startswith('MOVED '):
self._on_cluster_data_moved(response.args[0], command,
future)
elif response.args[0].startswith('READONLY '):
self._on_read_only_error(command, future)
else:
future.set_exception(exceptions.RedisError(response))
elif command.callback is not None:
future.set_result(command.callback(response))
elif command.expectation is not None:
self._eval_expectation(command, response, future)
else:
future.set_result(response)
else:
def on_data(data):
self._reader.feed(data)
self._read(command, future)
command.connection.read(on_data) | Invoked when a command is executed to read and parse its results.
It will loop on the IOLoop until the response is complete and then
set the value of the response in the execution future.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future |
def _encode_batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
buf = StringIO()
to_send, _ = _batched_write_command_impl(
namespace, operation, command, docs, check_keys, opts, ctx, buf)
return buf.getvalue(), to_send | Encode the next batched insert, update, or delete command. |
def _parse_show_output(cmd_ret):
parsed_data = dict()
list_key = None
for line in cmd_ret.splitlines():
if not line.strip():
continue
if not salt.utils.stringutils.contains_whitespace(line[0]):
list_key = None
if list_key:
list_value = _convert_to_closest_type(line)
parsed_data.setdefault(list_key, []).append(list_value)
continue
items = [item.strip() for item in line.split(':', 1)]
key = items[0].lower()
key = ' '.join(key.split()).replace(' ', '_')
list_key = key
try:
value = items[1]
except IndexError:
log.debug('Skipping line: %s', line)
continue
if value:
parsed_data[key] = _convert_to_closest_type(value)
return _convert_parsed_show_output(parsed_data=parsed_data) | Parse the output of an aptly show command.
:param str cmd_ret: The text of the command output that needs to be parsed.
:return: A dictionary containing the configuration data.
:rtype: dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.