code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def load(self, filename):
if not os.path.exists(filename):
raise AppConfigValueException('Could not load config file {0}'.
format(filename))
cfl = open(filename, 'r')
if PY2:
self.readfp(cfl)
else:
self.read_file(cfl)
cfl.close() | Load the given config file.
@param filename: the filename including the path to load. |
def to_json(self, propval, extraneous=False, to_json_func=None):
if self.json_out:
return self.json_out(propval)
else:
if not to_json_func:
from normalize.record.json import to_json
to_json_func = to_json
return to_json_func(propval, extraneous) | This function calls the ``json_out`` function, if it was specified,
otherwise continues with JSON conversion of the value in the slot by
calling ``to_json_func`` on it. |
def _get_image_size(self, maxcharno, maxlineno):
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad) | Get the required image size. |
def init_kerberos(app, service='HTTP', hostname=gethostname()):
global _SERVICE_NAME
_SERVICE_NAME = "%s@%s" % (service, hostname)
if 'KRB5_KTNAME' not in environ:
app.logger.warn("Kerberos: set KRB5_KTNAME to your keytab file")
else:
try:
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as exc:
app.logger.warn("Kerberos: %s" % exc.message[0])
else:
app.logger.info("Kerberos: server is %s" % principal) | Configure the GSSAPI service name, and validate the presence of the
appropriate principal in the kerberos keytab.
:param app: a flask application
:type app: flask.Flask
:param service: GSSAPI service name
:type service: str
:param hostname: hostname the service runs under
:type hostname: str |
def setRemoveAction(self, action):
if action not in ('unset', 'delete'):
raise orb.errors.ValidationError('The remove action must be either "unset" or "delete"')
else:
self.__removeAction = action | Sets the remove action that should be taken when a model is removed from the collection generated by
this reverse lookup. Valid actions are "unset" or "delete", any other values will raise an exception.
:param action: <str> |
def clear_cache(cls):
for key in cls._cached:
cls._cached[key] = None
cls._cached = {} | Call this before closing tk root |
def set_option(self, optionname, value):
for name, parms in zip(self.opt_names, self.opt_parms):
if name == optionname:
defaulttype = type(parms['enabled'])
if defaulttype != type(value) and defaulttype != type(None):
value = (defaulttype)(value)
parms['enabled'] = value
return True
else:
return False | Set the named option to value. Ensure the original type
of the option value is preserved. |
def request_syncmodule(blink, network):
url = "{}/network/{}/syncmodules".format(blink.urls.base_url, network)
return http_get(blink, url) | Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id. |
def markdown_media_css():
return dict(
CSS_SET=posixpath.join(
settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'
),
CSS_SKIN=posixpath.join(
'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN,
'style.css'
)
) | Add css requirements to HTML.
:returns: Editor template context. |
def get_pages(url):
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href')) | Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat |
def start(self):
logger.info("starting process")
process = os.fork()
time.sleep(0.01)
if process != 0:
logger.debug('starting child watcher')
self.loop.reset()
self.child_pid = process
self.watcher = pyev.Child(self.child_pid, False, self.loop, self._child)
self.watcher.start()
else:
self.loop.reset()
logger.debug('running main function')
self.run(*self.args, **self.kwargs)
logger.debug('quitting')
sys.exit(0) | Start the process, essentially forks and calls target function. |
def find_deck(provider: Provider, key: str, version: int, prod: bool=True) -> Optional[Deck]:
pa_params = param_query(provider.network)
if prod:
p2th = pa_params.P2TH_addr
else:
p2th = pa_params.test_P2TH_addr
rawtx = provider.getrawtransaction(key, 1)
deck = deck_parser((provider, rawtx, 1, p2th))
return deck | Find specific deck by deck id. |
def generate(self, path, label):
for filename in os.listdir(path):
self[filename] = label | Creates default data from the corpus at `path`, marking all
works with `label`.
:param path: path to a corpus directory
:type path: `str`
:param label: label to categorise each work as
:type label: `str` |
def get_event(self, block=True, timeout=None):
try:
return self._events.get(block, timeout)
except Empty:
return None | Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`. |
def unpackage(package_):
return salt.utils.msgpack.loads(package_, use_list=True,
_msgpack_module=msgpack) | Unpackages a payload |
def initLogging(verbosity=0, name="SCOOP"):
global loggingConfig
verbose_levels = {
-2: "CRITICAL",
-1: "ERROR",
0: "WARNING",
1: "INFO",
2: "DEBUG",
3: "DEBUG",
4: "NOSET",
}
log_handlers = {
"console":
{
"class": "logging.StreamHandler",
"formatter": "{name}Formatter".format(name=name),
"stream": "ext://sys.stderr",
},
}
loggingConfig.update({
"{name}Logger".format(name=name):
{
"handlers": ["console"],
"level": verbose_levels[verbosity],
},
})
dict_log_config = {
"version": 1,
"handlers": log_handlers,
"loggers": loggingConfig,
"formatters":
{
"{name}Formatter".format(name=name):
{
"format": "[%(asctime)-15s] %(module)-9s "
"%(levelname)-7s %(message)s",
},
},
}
dictConfig(dict_log_config)
return logging.getLogger("{name}Logger".format(name=name)) | Creates a logger. |
def convert(obj, ids, attr_type, item_func, cdata, parent='root'):
LOG.info('Inside convert(). obj type is: "%s", obj="%s"' % (type(obj).__name__, unicode_me(obj)))
item_name = item_func(parent)
if isinstance(obj, numbers.Number) or type(obj) in (str, unicode):
return convert_kv(item_name, obj, attr_type, cdata)
if hasattr(obj, 'isoformat'):
return convert_kv(item_name, obj.isoformat(), attr_type, cdata)
if type(obj) == bool:
return convert_bool(item_name, obj, attr_type, cdata)
if obj is None:
return convert_none(item_name, '', attr_type, cdata)
if isinstance(obj, dict):
return convert_dict(obj, ids, parent, attr_type, item_func, cdata)
if isinstance(obj, collections.Iterable):
return convert_list(obj, ids, parent, attr_type, item_func, cdata)
raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__)) | Routes the elements of an object to the right function to convert them
based on their data type |
def do_format(self, event_iterable):
for operation in self.formatter_chain:
partial_op = functools.partial(operation, colored=self.colored)
event_iterable = imap(partial_op, event_iterable)
return event_iterable | Formats the given CloudWatch Logs Event dictionary as necessary and returns an iterable that will
return the formatted string. This can be used to parse and format the events based on context
ie. In Lambda Function logs, a formatter may wish to color the "ERROR" keywords red,
or highlight a filter keyword separately etc.
This method takes an iterable as input and returns an iterable. It does not immediately format the event.
Instead, it sets up the formatter chain appropriately and returns the iterable. Actual formatting happens
only when the iterable is used by the caller.
Parameters
----------
event_iterable : iterable of samcli.lib.logs.event.LogEvent
Iterable that returns an object containing information about each log event.
Returns
-------
iterable of string
Iterable that returns a formatted event as a string. |
def main_update(self):
try:
os.nice(1)
except AttributeError as er:
pass
time.sleep(self.refresh)
try:
while True:
timestamp=time.time()
self.update()
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
self.commit()
except Exception as e:
self.error=e
raise | Main function called by the updater thread.
Direct call is unnecessary. |
def write_sequences_to_fasta(path, seqs):
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
path = Path(path)
records = []
for id, seq in seqs.items():
record = SeqRecord(Seq(seq), id=id, description='')
records.append(record)
SeqIO.write(records, str(path), 'fasta') | Create a FASTA file listing the given sequences.
Arguments
=========
path: str or pathlib.Path
The name of the file to create.
seqs: dict
A mapping of names to sequences, which can be either protein or DNA. |
def _new_url_record(cls, request: Request) -> URLRecord:
url_record = URLRecord()
url_record.url = request.url_info.url
url_record.status = Status.in_progress
url_record.try_count = 0
url_record.level = 0
return url_record | Return new empty URLRecord. |
def write(self):
self._assure_writable("write")
if not self._dirty:
return
if isinstance(self._file_or_files, (list, tuple)):
raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files"
% len(self._file_or_files))
if self._has_includes():
log.debug("Skipping write-back of configuration file as include files were merged in." +
"Set merge_includes=False to prevent this.")
return
fp = self._file_or_files
is_file_lock = isinstance(fp, string_types + (FileType, ))
if is_file_lock:
self._lock._obtain_lock()
if not hasattr(fp, "seek"):
with open(self._file_or_files, "wb") as fp:
self._write(fp)
else:
fp.seek(0)
if hasattr(fp, 'truncate'):
fp.truncate()
self._write(fp) | Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock |
def get_account(self, account_id, **kwargs):
if 'mask' not in kwargs:
kwargs['mask'] = 'status'
return self.account.getObject(id=account_id, **kwargs) | Retrieves a CDN account with the specified account ID.
:param account_id int: the numeric ID associated with the CDN account.
:param dict \\*\\*kwargs: additional arguments to include in the object
mask. |
def setBreak(self,breakFlag = True):
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self | Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable. |
def filter_products(self, desired_prods):
self.filter_prods = True
self.desired_prods = set(desired_prods) | When asked for a product, filter only those on this list. |
def _recv_nack(self, method_frame):
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue) | Receive a nack from the broker. |
def tagged(*tags: Tags) -> Callable:
global GREENSIM_TAG_ATTRIBUTE
def hook(event: Callable):
def wrapper(*args, **kwargs):
event(*args, **kwargs)
setattr(wrapper, GREENSIM_TAG_ATTRIBUTE, tags)
return wrapper
return hook | Decorator for adding a label to the process.
These labels are applied to any child Processes produced by event |
def validate_lun_path(self, host_wwpn, host_port, wwpn, lun):
host_wwpn_16 = format(int(host_wwpn, 16), '016x')
wwpn_16 = format(int(wwpn, 16), '016x')
lun_16 = format(int(lun, 16), '016x')
body = {
'host-world-wide-port-name': host_wwpn_16,
'adapter-port-uri': host_port.uri,
'target-world-wide-port-name': wwpn_16,
'logical-unit-number': lun_16,
}
self.manager.session.post(
self.uri + '/operations/validate-lun-path',
body=body) | Validate if an FCP storage volume on an actual storage subsystem is
reachable from this CPC, through a specified host port and using
a specified host WWPN.
This method performs the "Validate LUN Path" HMC operation.
If the volume is reachable, the method returns. If the volume is not
reachable (and no other errors occur), an :exc:`~zhmcclient.HTTPError`
is raised, and its :attr:`~zhmcclient.HTTPError.reason` property
indicates the reason as follows:
* 484: Target WWPN cannot be reached.
* 485: Target WWPN can be reached, but LUN cannot be reached.
The CPC must have the "dpm-storage-management" feature enabled.
Parameters:
host_wwpn (:term:`string`):
World wide port name (WWPN) of the host (CPC),
as a hexadecimal number of up to 16 characters in any lexical case.
This may be the WWPN of the physical storage port, or a WWPN of a
virtual HBA. In any case, it must be the kind of WWPN that is used
for zoning and LUN masking in the SAN.
host_port (:class:`~zhmcclient.Port`):
Storage port on the CPC that will be used for validating
reachability.
wwpn (:term:`string`):
World wide port name (WWPN) of the FCP storage subsystem containing
the storage volume,
as a hexadecimal number of up to 16 characters in any lexical case.
lun (:term:`string`):
Logical Unit Number (LUN) of the storage volume within its FCP
storage subsystem,
as a hexadecimal number of up to 16 characters in any lexical case.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` |
def create_pgroup_snapshot(self, source, **kwargs):
result = self.create_pgroup_snapshots([source], **kwargs)
if self._rest_version >= LooseVersion("1.4"):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result | Create snapshot of pgroup from specified source.
:param source: Name of pgroup of which to take snapshot.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created snapshot.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later. |
def apply_docstr(docstr_func):
def docstr_applier(func):
if isinstance(docstr_func, six.string_types):
olddoc = meta_util_six.get_funcdoc(func)
if olddoc is None:
olddoc = ''
newdoc = olddoc + docstr_func
meta_util_six.set_funcdoc(func, newdoc)
return func
else:
preserved_func = preserve_sig(func, docstr_func)
return preserved_func
return docstr_applier | Changes docstr of one functio to that of another |
def confirm_dialog(self, emitter):
self.from_fields_to_dict()
return super(ProjectConfigurationDialog,self).confirm_dialog(self) | event called pressing on OK button. |
def view(self, template_name, kwargs=None):
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content) | Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html') |
def is_parameter(self):
return (isinstance(self.scope, CodeFunction)
and self in self.scope.parameters) | Whether this is a function parameter. |
def get_content(self, url):
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None | Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache |
def get_host_info():
host_info = {}
for k, v in host_info_gatherers.items():
try:
host_info[k] = v()
except IgnoreHostInfo:
pass
return host_info | Collect some information about the machine this experiment runs on.
Returns
-------
dict
A dictionary with information about the CPU, the OS and the
Python version of this machine. |
def get_balance(self):
on_date = Datum()
on_date.today()
return self.get_balance_on(on_date.value) | Current account balance |
def _call(self, x, out=None):
if out is None:
return self.range.element(copy(self.constant))
else:
out.assign(self.constant) | Return the constant vector or assign it to ``out``. |
def _send_raw_command(ipmicmd, raw_bytes):
netfn, command, data = _parse_raw_bytes(raw_bytes)
response = ipmicmd.raw_command(netfn, command, data=data)
return response | Use IPMI command object to send raw ipmi command to BMC
:param ipmicmd: IPMI command object
:param raw_bytes: string of hexadecimal values. This is commonly used
for certain vendor specific commands.
:returns: dict -- The response from IPMI device |
def ignore_exceptions(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
logging.exception("Ignoring exception in %r", f)
return wrapped | Decorator catches and ignores any exceptions raised by this function. |
def expand_source_paths(paths):
for src_path in paths:
if src_path.endswith(('.pyc', '.pyo')):
py_path = get_py_path(src_path)
if os.path.exists(py_path):
src_path = py_path
yield src_path | Convert pyc files into their source equivalents. |
def link_type(arg_type, arg_name=None, include_bt:bool=True):
"Create link to documentation."
arg_name = arg_name or fn_name(arg_type)
if include_bt: arg_name = code_esc(arg_name)
if belongs_to_module(arg_type, 'torch') and ('Tensor' not in arg_name): return f'[{arg_name}]({get_pytorch_link(arg_type)})'
if is_fastai_class(arg_type): return f'[{arg_name}]({get_fn_link(arg_type)})'
return arg_name | Create link to documentation. |
def gather_meta(self):
if not os.path.exists(self.paths["meta"]):
return ""
meta_dict = utils.yaml_load(self.paths["meta"])
if meta_dict and "dependencies" in meta_dict:
dep_list = []
for dependency in meta_dict["dependencies"]:
if type(dependency) is dict:
dep_list.append(dependency["role"])
else:
dep_list.append(dependency)
meta_dict["dependencies"] = list(set(dep_list))
self.dependencies = meta_dict["dependencies"]
else:
self.dependencies = []
return utils.file_to_string(self.paths["meta"]) | Return the meta file. |
def _enable_thread_pool(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.enable_thread_pool and hasattr(self, 'thread_pool'):
future = self.thread_pool.submit(func, *args, **kwargs)
is_async = kwargs.get('is_async')
if is_async is None or not is_async:
timeout = kwargs.get('timeout')
if timeout is None:
timeout = 2
try:
result = future.result(timeout=timeout)
except TimeoutError as e:
self.logger.exception(e)
result = None
return result
return future
else:
return func(*args, **kwargs)
return wrapper | Use thread pool for executing a task if self.enable_thread_pool is True.
Return an instance of future when flag is_async is True otherwise will to
block waiting for the result until timeout then returns the result. |
def update(self, friendly_name=values.unset, unique_name=values.unset):
return self._proxy.update(friendly_name=friendly_name, unique_name=unique_name, ) | Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance |
def get_composition_metadata(self):
metadata = dict(self._mdata['composition'])
metadata.update({'existing_id_values': self._my_map['compositionId']})
return Metadata(**metadata) | Gets the metadata for linking this asset to a composition.
return: (osid.Metadata) - metadata for the composition
*compliance: mandatory -- This method must be implemented.* |
def geodesic(crs, start, end, steps):
r
import cartopy.crs as ccrs
from pyproj import Geod
g = Geod(crs.proj4_init)
geodesic = np.concatenate([
np.array(start[::-1])[None],
np.array(g.npts(start[1], start[0], end[1], end[0], steps - 2)),
np.array(end[::-1])[None]
]).transpose()
points = crs.transform_points(ccrs.Geodetic(), *geodesic)[:, :2]
return points | r"""Construct a geodesic path between two points.
This function acts as a wrapper for the geodesic construction available in `pyproj`.
Parameters
----------
crs: `cartopy.crs`
Cartopy Coordinate Reference System to use for the output
start: (2, ) array_like
A latitude-longitude pair designating the start point of the geodesic (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the geodesic (units are degrees
north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points).
Returns
-------
`numpy.ndarray`
The list of x, y points in the given CRS of length `steps` along the geodesic.
See Also
--------
cross_section |
def time(ctx, hours, minutes, seconds):
return _time(conversions.to_integer(hours, ctx), conversions.to_integer(minutes, ctx), conversions.to_integer(seconds, ctx)) | Defines a time value |
def unplug(self):
if not self.__plugged:
return
members = set([method for _, method
in inspect.getmembers(self, predicate=inspect.ismethod)])
for message in global_callbacks:
global_callbacks[message] -= members
self.__plugged = False | Remove the actor's methods from the callback registry. |
def kube_pod_status_phase(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag('namespace', sample[self.SAMPLE_LABELS], scraper_config),
self._label_to_tag('phase', sample[self.SAMPLE_LABELS], scraper_config),
] + scraper_config['custom_tags']
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags)) | Phase a pod is in. |
def get_first_content(el_list, alt=None, strip=True):
if not el_list:
return alt
content = el_list[0].getContent()
if strip:
content = content.strip()
if not content:
return alt
return content | Return content of the first element in `el_list` or `alt`. Also return `alt`
if the content string of first element is blank.
Args:
el_list (list): List of HTMLElement objects.
alt (default None): Value returner when list or content is blank.
strip (bool, default True): Call .strip() to content.
Returns:
str or alt: String representation of the content of the first element \
or `alt` if not found. |
def change_path_prefix(self, path, old_prefix, new_prefix, app_name):
relative_path = os.path.relpath(path, old_prefix)
return os.path.join(new_prefix, app_name, relative_path) | Change path prefix and include app name. |
def _render_item(self, dstack, key, value = None, **settings):
cur_depth = len(dstack) - 1
treeptrn = ''
s = self._es_text(settings, settings[self.SETTING_TREE_FORMATING])
for ds in dstack:
treeptrn += ' ' + self.fmt_text(self.tchar(settings[self.SETTING_TREE_STYLE], cur_depth, *ds), **s) + ''
strptrn = "{}"
if value is not None:
strptrn += ": {}"
s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING])
strptrn = self.fmt_text(strptrn.format(key, value), **s)
return '{} {}'.format(treeptrn, strptrn) | Format single tree line. |
def channel_names(self) -> tuple:
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"]) | Channel names. |
def is_topology(self, layers=None):
if layers is None:
layers = self.layers
layers_nodle = []
result = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
layers_nodle.append(i)
while True:
flag_break = True
layers_toremove = []
for layer1 in layers_nodle:
flag_arrive = True
for layer2 in layers[layer1].input:
if layer2 in layers_nodle:
flag_arrive = False
if flag_arrive is True:
for layer2 in layers[layer1].output:
if layers[layer2].set_size(layer1, layers[layer1].size) is False:
return False
layers_toremove.append(layer1)
result.append(layer1)
flag_break = False
for layer in layers_toremove:
layers_nodle.remove(layer)
result.append('|')
if flag_break:
break
if layers_nodle:
return False
return result | valid the topology |
def run(opts, args):
setup_options(opts)
mode = Modes.ONCE
if len(args) > 0 and hasattr(Modes, args[0].upper()):
_mode = args.pop(0).upper()
mode = getattr(Modes, _mode)
url = None
if len(args) > 0:
url = args.pop(0)
plugin_mgr = PluginManager.load_plugin_from_addonxml(mode, url)
plugin_mgr.run() | The run method for the 'run' command. Executes a plugin from the
command line. |
def add_item(self, item):
if not isinstance(item, JsonRpcResponse):
raise TypeError(
"Expected JsonRpcResponse but got {} instead".format(type(item).__name__))
self.items.append(item) | Adds an item to the batch. |
def group(__decorated__, **Config):
r
_Group = Group(__decorated__, Config)
if isclass(__decorated__):
static(__decorated__)
state.ActiveModuleMemberQ.insert(0, _Group)
return _Group.Underlying | r"""A decorator to make groups out of classes.
Config:
* name (str): The name of the group. Defaults to __decorated__.__name__.
* desc (str): The description of the group (optional).
* alias (str): The alias for the group (optional). |
def render(self, renderer=None, **kwargs):
return Markup(get_renderer(current_app, renderer)(**kwargs).visit(
self)) | Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result. |
def sql_get_oids(self, where=None):
table = self.lconfig.get('table')
db = self.lconfig.get('db_schema_name') or self.lconfig.get('db')
_oid = self.lconfig.get('_oid')
if is_array(_oid):
_oid = _oid[0]
sql = 'SELECT DISTINCT %s.%s FROM %s.%s' % (table, _oid, db, table)
if where:
where = [where] if isinstance(where, basestring) else list(where)
sql += ' WHERE %s' % ' OR '.join(where)
result = sorted([r[_oid] for r in self._load_sql(sql)])
return result | Query source database for a distinct list of oids. |
def update_proficiency(self, proficiency_form):
collection = JSONClientValidated('learning',
collection='Proficiency',
runtime=self._runtime)
if not isinstance(proficiency_form, ABCProficiencyForm):
raise errors.InvalidArgument('argument type is not an ProficiencyForm')
if not proficiency_form.is_for_update():
raise errors.InvalidArgument('the ProficiencyForm is for update only, not create')
try:
if self._forms[proficiency_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('proficiency_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('proficiency_form did not originate from this session')
if not proficiency_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(proficiency_form._my_map)
self._forms[proficiency_form.get_id().get_identifier()] = UPDATED
return objects.Proficiency(
osid_object_map=proficiency_form._my_map,
runtime=self._runtime,
proxy=self._proxy) | Updates an existing proficiency.
arg: proficiency_form (osid.learning.ProficiencyForm): the
form containing the elements to be updated
raise: IllegalState - ``proficiency_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``proficiency_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``proficiency_form`` did not originate
from ``get_proficiency_form_for_update()``
*compliance: mandatory -- This method must be implemented.* |
def make_tmp_name(name):
path, base = os.path.split(name)
tmp_base = ".tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name) | Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted. |
def reorderbydf(df2,df1):
df3=pd.DataFrame()
for idx,row in df1.iterrows():
df3=df3.append(df2.loc[idx,:])
return df3 | Reorder rows of a dataframe by other dataframe
:param df2: input dataframe
:param df1: template dataframe |
def cctop_save_xml(jobid, outpath):
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status)) | Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename |
def date_range(start=None, end=None, periods=None, freq=None, tz=None,
normalize=False, name=None, closed=None, **kwargs):
if freq is None and com._any_none(periods, start, end):
freq = 'D'
dtarr = DatetimeArray._generate_range(
start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize,
closed=closed, **kwargs)
return DatetimeIndex._simple_new(
dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name) | Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D') |
def schema_exists(cls, cur, schema_name):
cur.execute("SELECT EXISTS (SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}');"
.format(schema_name))
return cur.fetchone()[0] | Check if schema exists |
def _oops_dump_state(self, ignore_remaining_data=False):
log_error("==Oops state dump" + "=" * (30 - 17))
log_error("References: {0}".format(self.references))
log_error("Stream seeking back at -16 byte (2nd line is an actual position!):")
self.object_stream.seek(-16, os.SEEK_CUR)
position = self.object_stream.tell()
the_rest = self.object_stream.read()
if not ignore_remaining_data and len(the_rest):
log_error(
"Warning!!!!: Stream still has {0} bytes left:\n{1}".format(
len(the_rest), self._create_hexdump(the_rest, position)
)
)
log_error("=" * 30) | Log a deserialization error
:param ignore_remaining_data: If True, don't log an error when
unused trailing bytes are remaining |
def addSource(self, source, data):
self._aggregate(source, self._aggregators, data, self._result) | Adds the given source's stats. |
def check_signature_supported(func, warn=False):
function_name = func.__name__
sig_params = get_signature_params(func)
has_kwargs_param = False
has_kwonly_param = False
for keyword_name, parameter in sig_params:
if parameter.kind == Parameter.VAR_KEYWORD:
has_kwargs_param = True
if parameter.kind == Parameter.KEYWORD_ONLY:
has_kwonly_param = True
if has_kwargs_param:
message = ("The function {} has a **kwargs argument, which is "
"currently not supported.".format(function_name))
if warn:
logger.warning(message)
else:
raise Exception(message)
if has_kwonly_param:
message = ("The function {} has a keyword only argument "
"(defined after * or *args), which is currently "
"not supported.".format(function_name))
if warn:
logger.warning(message)
else:
raise Exception(message) | Check if we support the signature of this function.
We currently do not allow remote functions to have **kwargs. We also do not
support keyword arguments in conjunction with a *args argument.
Args:
func: The function whose signature should be checked.
warn: If this is true, a warning will be printed if the signature is
not supported. If it is false, an exception will be raised if the
signature is not supported.
Raises:
Exception: An exception is raised if the signature is not supported. |
def infer_axes(self):
s = self.storable
if s is None:
return False
self.get_attrs()
return True | infer the axes of my storer
return a boolean indicating if we have a valid storer or not |
def flush(self):
self.log.info('Flushing tables and arrays to disk...')
for tab in self._tables.values():
tab.flush()
self._write_ndarrays_cache_to_disk() | Flush tables and arrays to disk |
def readdir(self, tid, fh):
ret = []
pt = self.PathType.get(tid)
try:
if pt is self.PathType.main:
ret = list(self.searches)
elif pt is self.PathType.subdir:
ret = list(self.searches[tid[0]])
elif pt is self.PathType.file:
raise FuseOSError(errno.ENOTDIR)
else:
raise FuseOSError(errno.ENOENT)
except KeyError:
raise FuseOSError(errno.ENOENT)
return ['.', '..'] + ret | Read directory contents. Lists visible elements of ``YTActions`` object.
Parameters
----------
tid : str
Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
fh : int
File descriptor. Ommited in the function body.
Returns
-------
list
List of filenames, wich will be shown as directory content. |
def get_running():
ret = set()
out = __salt__['cmd.run'](
_systemctl_cmd('--full --no-legend --no-pager'),
python_shell=False,
ignore_retcode=True)
for line in salt.utils.itertools.split(out, '\n'):
try:
comps = line.strip().split()
fullname = comps[0]
if len(comps) > 3:
active_state = comps[3]
except ValueError as exc:
log.error(exc)
continue
else:
if active_state != 'running':
continue
try:
unit_name, unit_type = fullname.rsplit('.', 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == 'service' else fullname)
return sorted(ret) | Return a list of all running services, so far as systemd is concerned
CLI Example:
.. code-block:: bash
salt '*' service.get_running |
def get_monomers(self, ligands=True):
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers) | Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`. |
def _listitemify(self, item):
info_type = self.info_type if hasattr(self, 'info_type') else 'video'
if not hasattr(item, 'as_tuple'):
if 'info_type' not in item.keys():
item['info_type'] = info_type
item = xbmcswift2.ListItem.from_dict(**item)
return item | Creates an xbmcswift2.ListItem if the provided value for item is a
dict. If item is already a valid xbmcswift2.ListItem, the item is
returned unmodified. |
def get_role_model():
app_model = getattr(settings, "ARCTIC_ROLE_MODEL", "arctic.Role")
try:
return django_apps.get_model(app_model)
except ValueError:
raise ImproperlyConfigured(
"ARCTIC_ROLE_MODEL must be of the " "form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
"ARCTIC_ROLE_MODEL refers to model '%s' that has not been "
"installed" % settings.ARCTIC_ROLE_MODEL
) | Returns the Role model that is active in this project. |
def retrieve_nodes(self):
self.verbose('retrieving nodes from old mysql DB...')
self.old_nodes = list(OldNode.objects.all())
self.message('retrieved %d nodes' % len(self.old_nodes)) | retrieve nodes from old mysql DB |
def is_serializable_type(type_):
if not inspect.isclass(type_):
return Serializable.is_serializable(type_)
return issubclass(type_, Serializable) or hasattr(type_, '_asdict') | Return `True` if the given type's instances conform to the Serializable protocol.
:rtype: bool |
def find_maximum(self, scores, N, k_choices):
if not isinstance(scores, np.ndarray):
raise TypeError("Scores input is not a numpy array")
index_of_maximum = int(scores.argmax())
maximum_combo = self.nth(combinations(
list(range(N)), k_choices), index_of_maximum, None)
return sorted(maximum_combo) | Finds the `k_choices` maximum scores from `scores`
Arguments
---------
scores : numpy.ndarray
N : int
k_choices : int
Returns
-------
list |
def reverse_dictionary(d):
rev_d = {}
[rev_d.update({v:k}) for k, v in d.items()]
return rev_d | Reverses the key value pairs for a given dictionary.
Parameters
----------
d : :obj:`dict`
dictionary to reverse
Returns
-------
:obj:`dict`
dictionary with keys and values swapped |
def match_ancestor_objective_id(self, objective_id=None, match=None):
if match:
self._add_match('ancestorObjectiveId', objective_id)
else:
raise errors.Unimplemented() | Sets the objective ``Id`` for this query to match objectives that have the specified objective as an ancestor.
arg: objective_id (osid.id.Id): an objective ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``objective_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def add_context(self, name, indices, level=None):
self._validate_context((name, indices))
if level is None:
level = len(self.contexts_ranked)
self.contexts_ranked.insert(level, name)
self.contexts[name] = indices | Add a new context level to the hierarchy.
By default, new contexts are added to the lowest level of the hierarchy.
To insert the context elsewhere in the hierarchy, use the ``level``
argument. For example, ``level=0`` would insert the context at the
highest level of the hierarchy.
Parameters
----------
name : str
indices : list
Token indices at which each chunk in the context begins.
level : int
Level in the hierarchy at which to insert the context. By default,
inserts context at the lowest level of the hierarchy |
def parallel_progbar(mapper, iterable, nprocs=None, starmap=False, flatmap=False, shuffle=False,
verbose=True, verbose_flatmap=None, **kwargs):
results = _parallel_progbar_launch(mapper, iterable, nprocs, starmap, flatmap, shuffle, verbose, verbose_flatmap, **kwargs)
return [x for i, x in sorted(results, key=lambda p: p[0])] | Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned
:param mapper: The mapping function to apply to elements of the iterable
:param iterable: The iterable to map
:param nprocs: The number of processes (defaults to the number of cpu's)
:param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a
tuple as an argument
:param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects
:param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform
runtimes if processing different objects takes different amounts of time.
:param verbose: Whether or not to print the progress bar
:param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned
:param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``)
:return: A list of the returned objects, in the same order as provided |
def get_snmp_from_host2(self):
if not self.snmp2:
self.activity_value2 = None
else:
response = self.snmp2.get_oids(activity_oid)
self.activity_value2 = activity[int(response[0])] | Get SNMP values from 2nd host. |
def build(self, build_dir, **kwargs):
del kwargs
args = ["cmake", "--build", build_dir]
args.extend(self._get_build_flags())
return [{"args": args}] | This function builds the cmake build command. |
def get_features(self, yam):
mcap = [ c for c in self.capabilities
if c.parameters.get("module", None) == yam ][0]
if not mcap.parameters.get("features"): return []
return mcap.parameters["features"].split(",") | Return list of features declared for module `yam`. |
def constraint_to_si(expr):
satisfiable = True
replace_list = [ ]
satisfiable, replace_list = backends.vsa.constraint_to_si(expr)
for i in xrange(len(replace_list)):
ori, new = replace_list[i]
if not isinstance(new, Base):
new = BVS(new.name, new._bits, min=new._lower_bound, max=new._upper_bound, stride=new._stride, explicit_name=True)
replace_list[i] = (ori, new)
return satisfiable, replace_list | Convert a constraint to SI if possible.
:param expr:
:return: |
def get_assessment_offered_bank_assignment_session(self, proxy):
if not self.supports_assessment_offered_bank_assignment():
raise errors.Unimplemented()
return sessions.AssessmentOfferedBankAssignmentSession(proxy=proxy, runtime=self._runtime) | Gets the session for assigning offered assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentOfferedBankAssignmentSession)
- an ``AssessmentOfferedBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_offered_bank_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_bank_assignment()`` is ``true``.* |
def section_branch_orders(neurites, neurite_type=NeuriteType.all):
return map_sections(sectionfunc.branch_order, neurites, neurite_type=neurite_type) | section branch orders in a collection of neurites |
async def is_bot(self):
if self._bot is None:
self._bot = (await self.get_me()).bot
return self._bot | Return ``True`` if the signed-in user is a bot, ``False`` otherwise. |
def sub_for(expr, substitutions):
mapping = {k.op(): v for k, v in substitutions}
substitutor = Substitutor()
return substitutor.substitute(expr, mapping) | Substitute subexpressions in `expr` with expression to expression
mapping `substitutions`.
Parameters
----------
expr : ibis.expr.types.Expr
An Ibis expression
substitutions : List[Tuple[ibis.expr.types.Expr, ibis.expr.types.Expr]]
A mapping from expression to expression. If any subexpression of `expr`
is equal to any of the keys in `substitutions`, the value for that key
will replace the corresponding expression in `expr`.
Returns
-------
ibis.expr.types.Expr
An Ibis expression |
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree) | _dispatcher function, _dispatching tree type T to method _T. |
def update_share(self, share_id, **kwargs):
perms = kwargs.get('perms', None)
password = kwargs.get('password', None)
public_upload = kwargs.get('public_upload', None)
if (isinstance(perms, int)) and (perms > self.OCS_PERMISSION_ALL):
perms = None
if not (perms or password or (public_upload is not None)):
return False
if not isinstance(share_id, int):
return False
data = {}
if perms:
data['permissions'] = perms
if isinstance(password, six.string_types):
data['password'] = password
if (public_upload is not None) and (isinstance(public_upload, bool)):
data['publicUpload'] = str(public_upload).lower()
res = self._make_ocs_request(
'PUT',
self.OCS_SERVICE_SHARE,
'shares/' + str(share_id),
data=data
)
if res.status_code == 200:
return True
raise HTTPResponseError(res) | Updates a given share
:param share_id: (int) Share ID
:param perms: (int) update permissions (see share_file_with_user() below)
:param password: (string) updated password for public link Share
:param public_upload: (boolean) enable/disable public upload for public shares
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned |
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
num = int(num)
start = start * 1.
stop = stop * 1.
if num <= 0:
return []
if endpoint:
if num == 1:
return [start]
step = (stop-start)/float((num-1))
if num == 1:
step = nan
y = [start]
for _ in range(num-2):
y.append(y[-1] + step)
y.append(stop)
else:
step = (stop-start)/float(num)
if num == 1:
step = nan
y = [start]
for _ in range(num-1):
y.append(y[-1] + step)
if retstep:
return y, step
else:
return y | Port of numpy's linspace to pure python. Does not support dtype, and
returns lists of floats. |
def legislator_vote_value(self):
if not hasattr(self, 'legislator'):
msg = ('legislator_vote_value can only be called '
'from a vote accessed by legislator.votes_manager.')
raise ValueError(msg)
leg_id = self.legislator.id
for k in ('yes', 'no', 'other'):
for leg in self[k + '_votes']:
if leg['leg_id'] == leg_id:
return k | If this vote was accessed through the legislator.votes_manager,
return the value of this legislator's vote. |
def transfer(self, data):
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
response = ''
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
time.sleep(0.01)
while len(response) < len(data):
response += self.sendCommand(commands.SPITransferCommand('')).data
return ''.join(response) | Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device. |
def kill_cursor(self, cursor):
text = cursor.selectedText()
if text:
cursor.removeSelectedText()
self.kill(text) | Kills the text selected by the give cursor. |
def close_compute_projects(self, compute):
for project in self._projects.values():
if compute in project.computes:
yield from project.close() | Close projects running on a compute |
def post_helper(form_tag=True, edit_mode=False):
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper | Post's form layout helper |
def install_deny_hook(api):
if api == USED_API:
raise ValueError
sys.meta_path.insert(0, ImportHookDeny(api)) | Install a deny import hook for Qt api.
Parameters
----------
api : str
The Qt api whose import should be prevented
Example
-------
>>> install_deny_import("pyqt4")
>>> import PyQt4
Traceback (most recent call last):...
ImportError: Import of PyQt4 is denied. |
def build_single_handler_applications(paths, argvs=None):
applications = {}
argvs = {} or argvs
for path in paths:
application = build_single_handler_application(path, argvs.get(path, []))
route = application.handlers[0].url_path()
if not route:
if '/' in applications:
raise RuntimeError("Don't know the URL path to use for %s" % (path))
route = '/'
applications[route] = application
return applications | Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
path (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.