code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def send_command_ack(self, device_id, action):
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
log.debug('packet acknowledged')
except concurrent.futures._base.TimeoutError:
acknowledgement = {'ok': False, 'message': 'timeout'}
log.warning('acknowledge timeout')
else:
acknowledgement = self._last_ack.get('ok', False)
finally:
self._ready_to_send.release()
return acknowledgement | Send command, wait for gateway to repond with acknowledgment. |
def _get_sample(self, mode, encoding):
self._open_file(mode, encoding)
self._sample = self._file.read(UniversalCsvReader.sample_size)
self._file.close() | Get a sample from the next current input file.
:param str mode: The mode for opening the file.
:param str|None encoding: The encoding of the file. None for open the file in binary mode. |
def _get_obj_ct(self, obj):
if not hasattr(obj, '_wfct'):
if hasattr(obj, 'polymorphic_ctype'):
obj._wfct = obj.polymorphic_ctype
else:
obj._wfct = ContentType.objects.get_for_model(obj)
return obj._wfct | Look up and return object's content type and cache for reuse |
def getLogger(cls, name=None):
return logging.getLogger("{0}.{1}".format(cls.BASENAME, name) if name else cls.BASENAME) | Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object. |
def from_outcars_and_structures(cls, outcars, structures,
calc_ionic_from_zval=False):
p_elecs = []
p_ions = []
for i, o in enumerate(outcars):
p_elecs.append(o.p_elec)
if calc_ionic_from_zval:
p_ions.append(
get_total_ionic_dipole(structures[i], o.zval_dict))
else:
p_ions.append(o.p_ion)
return cls(p_elecs, p_ions, structures) | Create Polarization object from list of Outcars and Structures in order
of nonpolar to polar.
Note, we recommend calculating the ionic dipole moment using calc_ionic
than using the values in Outcar (see module comments). To do this set
calc_ionic_from_zval = True |
def exists_alias(self, alias_name, index_name=None):
return self._es_conn.indices.exists_alias(index=index_name, name=alias_name) | Check whether or not the given alias exists
:return: True if alias already exist |
def Print(self):
for hypo, prob in sorted(self.Items()):
print(hypo, prob) | Prints the hypotheses and their probabilities. |
def get_image_path(image_lists, label_name, index, image_dir, category):
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path | Returns a path to an image for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters. |
def report_onlysize(bytes_so_far, total_size, speed, eta):
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(10)
total = approximate_size(total_size).center(10)
sys.stdout.write('D: {0}% -{1}/{2}'.format(percent, current, total) + "eta {0}".format(eta))
sys.stdout.write("\r")
sys.stdout.flush() | This callback for the download function is used when console width
is not enough to print the bar.
It prints only the sizes |
def log(self, timer_name, node):
timestamp = time.time()
if hasattr(self, timer_name):
getattr(self, timer_name).append({
"node":node,
"time":timestamp})
else:
setattr(self, timer_name, [{"node":node, "time":timestamp}]) | logs a event in the timer |
def __getFileObj(self, f):
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb+") | Safety handler to verify file-like objects |
def custom_template_name(self):
base_path = getattr(settings, "CUSTOM_SPECIAL_COVERAGE_PATH", "special_coverage/custom")
if base_path is None:
base_path = ""
return "{0}/{1}_custom.html".format(
base_path, self.slug.replace("-", "_")
).lstrip("/") | Returns the path for the custom special coverage template we want. |
def _update_show_toolbars_action(self):
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip) | Update the text displayed in the menu entry. |
async def pulse(self):
get_state = GetState(pyvlx=self.pyvlx)
await get_state.do_api_call()
if not get_state.success:
raise PyVLXException("Unable to send get state.") | Send get state request to API to keep the connection alive. |
def wrap(self, row: Union[Mapping[str, Any], Sequence[Any]]):
return (
self.dataclass(
**{
ident: row[column_name]
for ident, column_name in self.ids_and_column_names.items()
}
)
if isinstance(row, Mapping)
else self.dataclass(
**{ident: val for ident, val in zip(self.ids_and_column_names.keys(), row)}
)
) | Return row tuple for row. |
def get_version():
_globals = {}
_locals = {}
exec(
compile(
open(TOP + "/manta/version.py").read(), TOP + "/manta/version.py",
'exec'), _globals, _locals)
return _locals["__version__"] | Get the python-manta version without having to import the manta package,
which requires deps to already be installed. |
def encode_hook(self, hook, msg):
if 'name' in hook:
msg.name = str_to_bytes(hook['name'])
else:
self.encode_modfun(hook, msg.modfun)
return msg | Encodes a commit hook dict into the protobuf message. Used in
bucket properties.
:param hook: the hook to encode
:type hook: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbCommitHook
:rtype riak.pb.riak_pb2.RpbCommitHook |
def _format_type_in_doc(self, namespace, data_type):
if is_void_type(data_type):
return 'None'
elif is_user_defined_type(data_type):
return ':class:`{}.{}.{}`'.format(
self.args.types_package, namespace.name, fmt_type(data_type))
else:
return fmt_type(data_type) | Returns a string that can be recognized by Sphinx as a type reference
in a docstring. |
def enum(cls, options, values):
names, real = zip(*options)
del names
def factory(i, name):
return cls(i, name, (len(real),), lambda a: real[a[0]], values)
return factory | Create an ArgumentType where you choose one of a set of known values. |
def inspect_filter_calculation(self):
try:
node = self.ctx.cif_filter
self.ctx.cif = node.outputs.cif
except exceptions.NotExistent:
self.report('aborting: CifFilterCalculation<{}> did not return the required cif output'.format(node.uuid))
return self.exit_codes.ERROR_CIF_FILTER_FAILED | Inspect the result of the CifFilterCalculation, verifying that it produced a CifData output node. |
def main():
logging.basicConfig()
logger.info("mmi-runner")
warnings.warn(
"You are using the mmi-runner script, please switch to `mmi runner`",
DeprecationWarning
)
arguments = docopt.docopt(__doc__)
kwargs = parse_args(arguments)
runner = mmi.runner.Runner(
**kwargs
)
runner.run() | run mmi runner |
def getVisibility(self):
try:
if self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
return VISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'INVISIBLE':
return INVISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'GONE':
return GONE
else:
return -2
except:
return -1 | Gets the View visibility |
def unbind(self, handler, argspec):
self.handlers[argspec.key].remove((handler, argspec))
if not len(self.handlers[argspec.key]):
del self.handlers[argspec.key] | handler will no longer be called if args match argspec
:param argspec: instance of ArgSpec - args to be matched |
def listrecursive(path, ext=None):
filenames = set()
for root, dirs, files in os.walk(path):
if ext:
if ext == 'tif' or ext == 'tiff':
tmp = fnmatch.filter(files, '*.' + 'tiff')
files = tmp + fnmatch.filter(files, '*.' + 'tif')
else:
files = fnmatch.filter(files, '*.' + ext)
for filename in files:
filenames.add(os.path.join(root, filename))
filenames = list(filenames)
filenames.sort()
return sorted(filenames) | List files recurisvely |
def get_fetcher_assets(self, dt):
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else [] | Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects. |
def is_job_done(job_id, conn=None):
result = False
get_done = RBJ.get_all(DONE, index=STATUS_FIELD)
for item in get_done.filter({ID_FIELD: job_id}).run(conn):
result = item
return result | is_job_done function checks to if Brain.Jobs Status is 'Done'
:param job_id: <str> id for the job
:param conn: (optional)<connection> to run on
:return: <dict> if job is done <false> if |
def mnemonic(self, value):
if value not in REIL_MNEMONICS:
raise Exception("Invalid instruction mnemonic : %s" % str(value))
self._mnemonic = value | Set instruction mnemonic. |
def pause(self):
self._mq.send("p", True, type=1)
self._paused = True | Pause pulse capture |
def apply_and_save(self):
patches = self.patches
content = None
with open(self.IN_PATH) as f_in:
content = f_in.read()
for key in self.replaced_word_dict:
content = content.replace(key, self.replaced_word_dict[key])
out_patches = []
for patch in patches:
pattern = re.compile(patch['src'], re.MULTILINE)
(content, subs_num) = re.subn(pattern, patch['dest'],
content)
if subs_num > 0:
patch['applied'] = True
out_patches.append(patch)
for patch in out_patches:
if patch.get('required') and not patch.get('applied'):
Log.warn('Patch not applied {0}'.format(patch['src']))
with open(self.OUT_PATH, 'w') as f_out:
f_out.write(content)
self.pathces = out_patches
content = None | Apply replaced words and patches, and save setup.py file. |
def sas_interconnect_types(self):
if not self.__sas_interconnect_types:
self.__sas_interconnect_types = SasInterconnectTypes(self.__connection)
return self.__sas_interconnect_types | Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes: |
def pull_all_external(collector, **kwargs):
deps = set()
images = collector.configuration["images"]
for layer in Builder().layered(images):
for image_name, image in layer:
for dep in image.commands.external_dependencies:
deps.add(dep)
for dep in sorted(deps):
kwargs["image"] = dep
pull_arbitrary(collector, **kwargs) | Pull all the external dependencies of all the images |
def shutdown(message=None, timeout=5, force_close=True, reboot=False,
in_seconds=False, only_on_pending_reboot=False):
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False | Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes" |
def filter(self, local_name=None, name=None, ns_uri=None, node_type=None,
filter_fn=None, first_only=False):
if filter_fn is None:
def filter_fn(n):
if node_type is not None:
if isinstance(node_type, int):
if not n.is_type(node_type):
return False
elif n.__class__ != node_type:
return False
if name is not None and n.name != name:
return False
if local_name is not None and n.local_name != local_name:
return False
if ns_uri is not None and n.ns_uri != ns_uri:
return False
return True
nodelist = filter(filter_fn, self)
if first_only:
return nodelist[0] if nodelist else None
else:
return NodeList(nodelist) | Apply filters to the set of nodes in this list.
:param local_name: a local name used to filter the nodes.
:type local_name: string or None
:param name: a name used to filter the nodes.
:type name: string or None
:param ns_uri: a namespace URI used to filter the nodes.
If *None* all nodes are returned regardless of namespace.
:type ns_uri: string or None
:param node_type: a node type definition used to filter the nodes.
:type node_type: int node type constant, class, or None
:param filter_fn: an arbitrary function to filter nodes in this list.
This function must accept a single :class:`Node` argument and
return a bool indicating whether to include the node in the
filtered results.
.. note:: if ``filter_fn`` is provided all other filter arguments
are ignore.
:type filter_fn: function or None
:return: the type of the return value depends on the value of the
``first_only`` parameter and how many nodes match the filter:
- if ``first_only=False`` return a :class:`NodeList` of filtered
nodes, which will be empty if there are no matching nodes.
- if ``first_only=True`` and at least one node matches,
return the first matching :class:`Node`
- if ``first_only=True`` and there are no matching nodes,
return *None* |
def _modifyItemTag(self, item_id, action, tag):
return self.httpPost(ReaderUrl.EDIT_TAG_URL,
{'i': item_id, action: tag, 'ac': 'edit-tags'}) | wrapper around actual HTTP POST string for modify tags |
def process(self, metric):
if not boto:
return
collector = str(metric.getCollectorPath())
metricname = str(metric.getMetricPath())
for rule in self.rules:
self.log.debug(
"Comparing Collector: [%s] with (%s) "
"and Metric: [%s] with (%s)",
str(rule['collector']),
collector,
str(rule['metric']),
metricname
)
if ((str(rule['collector']) == collector and
str(rule['metric']) == metricname)):
if rule['collect_by_instance'] and self.instance_id:
self.send_metrics_to_cloudwatch(
rule,
metric,
{'InstanceId': self.instance_id})
if rule['collect_without_dimension']:
self.send_metrics_to_cloudwatch(
rule,
metric,
{}) | Process a metric and send it to CloudWatch |
def get_arrays_from_file(params_file, params=None):
try:
f = h5py.File(params_file, 'r')
except:
raise ValueError('File not found.')
if params is not None:
if not isinstance(params, list):
params = [params]
for p in params:
if p not in f.keys():
raise ValueError('Parameter {} is not in {}'
.format(p, params_file))
else:
params = [str(k) for k in f.keys()]
params_values = {p:f[p][:] for p in params}
try:
bandwidth = f.attrs["bandwidth"]
except KeyError:
bandwidth = "scott"
f.close()
return params_values, bandwidth | Reads the values of one or more parameters from an hdf file and
returns as a dictionary.
Parameters
----------
params_file : str
The hdf file that contains the values of the parameters.
params : {None, list}
If provided, will just retrieve the given parameter names.
Returns
-------
dict
A dictionary of the parameters mapping `param_name -> array`. |
def get_file_listing_sha(listing_paths: Iterable) -> str:
return sha256(''.join(sorted(listing_paths)).encode('utf-8')).hexdigest() | Return sha256 string for group of FTP listings. |
def _shutdown_unlocked(self, context, lru=None, new_context=None):
LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context)
context.shutdown()
via = self._via_by_context.get(context)
if via:
lru = self._lru_by_via.get(via)
if lru:
if context in lru:
lru.remove(context)
if new_context:
lru.append(new_context)
self._forget_context_unlocked(context) | Arrange for `context` to be shut down, and optionally add `new_context`
to the LRU list while holding the lock. |
def clear(self):
if os.path.exists(self.path):
os.remove(self.path) | Remove all existing done markers and the file used to store the dones. |
def create_appointment_group(self, appointment_group, **kwargs):
from canvasapi.appointment_group import AppointmentGroup
if (
isinstance(appointment_group, dict) and
'context_codes' in appointment_group and
'title' in appointment_group
):
kwargs['appointment_group'] = appointment_group
elif (
isinstance(appointment_group, dict) and
'context_codes' not in appointment_group
):
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is missing."
)
elif isinstance(appointment_group, dict) and 'title' not in appointment_group:
raise RequiredFieldMissing("Dictionary with key 'title' is missing.")
response = self.__requester.request(
'POST',
'appointment_groups',
_kwargs=combine_kwargs(**kwargs)
)
return AppointmentGroup(self.__requester, response.json()) | Create a new Appointment Group.
:calls: `POST /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.create>`_
:param appointment_group: The attributes of the appointment group.
:type appointment_group: `dict`
:param title: The title of the appointment group.
:type title: `str`
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup` |
def claim_exp(self, data):
expiration = getattr(settings, 'OAUTH_ID_TOKEN_EXPIRATION', 30)
expires = self.now + timedelta(seconds=expiration)
return timegm(expires.utctimetuple()) | Required expiration time. |
def request(self, method, params):
identifier = random.randint(1, 1000)
self._transport.write(jsonrpc_request(method, identifier, params))
self._buffer[identifier] = {'flag': asyncio.Event()}
yield from self._buffer[identifier]['flag'].wait()
result = self._buffer[identifier]['data']
del self._buffer[identifier]['data']
return result | Send a JSONRPC request. |
def update_ngram(self, ngram, count):
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query) | Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram. |
def _prepare_load_balancers(self):
stack = {
A.NAME: self[A.NAME],
A.VERSION: self[A.VERSION],
}
for load_balancer in self.get(R.LOAD_BALANCERS, []):
svars = {A.STACK: stack}
load_balancer[A.loadbalancer.VARS] = svars | Prepare load balancer variables |
def build_message(self, stat, value):
return ' '.join((self.prefix + str(stat), str(value), str(round(time())))) | Build a metric in Graphite format. |
def get_multiplicon_seeds(self, redundant=False):
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.in_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue | Return a generator of the IDs of multiplicons that are initial
seeding 'pairs' in level 2 multiplicons.
Arguments:
o redundant - if true, report redundant multiplicons |
def _cls_fqn(self, cls):
ns = self._namespace_stack[-1]
if ns in ['__base__', None]:
return cls.__name__
else:
return ns + '.' + cls.__name__ | Returns fully qualified name for the class based on current namespace
and the class name. |
def eval_constraints(self, constraints):
try:
return all(self.eval_ast(c) for c in constraints)
except errors.ClaripyZeroDivisionError:
return False | Returns whether the constraints is satisfied trivially by using the
last model. |
def random_subset_ids_by_count(self, count_per_class=1):
class_sizes = self.class_sizes
subsets = list()
if count_per_class < 1:
warnings.warn('Atleast one sample must be selected from each class')
return list()
elif count_per_class >= self.num_samples:
warnings.warn('All samples requested - returning a copy!')
return self.keys
for class_id, class_size in class_sizes.items():
this_class = self.keys_with_value(self.classes, class_id)
random.shuffle(this_class)
subset_size_this_class = max(0, min(class_size, count_per_class))
if subset_size_this_class < 1 or this_class is None:
warnings.warn('No subjects from class {} were selected.'.format(class_id))
else:
subsets_this_class = this_class[0:count_per_class]
subsets.extend(subsets_this_class)
if len(subsets) > 0:
return subsets
else:
warnings.warn('Zero samples were selected. Returning an empty list!')
return list() | Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes. |
def score(self, env=None, score_out=None):
messages = {}
self.assignment.set_args(
score=True,
score_out=score_out,
)
if env is None:
import __main__
env = __main__.__dict__
self.run('scoring', messages, env=env)
return messages['scoring'] | Run the scoring protocol.
score_out -- str; a file name to write the point breakdown
into.
Returns: dict; maps score tag (str) -> points (float) |
def interconnect_all(self):
for dep in topologically_sorted(self._provides):
if hasattr(dep, '__injections__') and not hasattr(dep, '__injections_source__'):
self.inject(dep) | Propagate dependencies for provided instances |
def _update_task(self, task):
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, 'service_class', '')
self._set_lane_data() | Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object. |
def add_how(voevent, descriptions=None, references=None):
if not voevent.xpath('How'):
etree.SubElement(voevent, 'How')
if descriptions is not None:
for desc in _listify(descriptions):
etree.SubElement(voevent.How, 'Description')
voevent.How.Description[-1] = desc
if references is not None:
voevent.How.extend(_listify(references)) | Add descriptions or references to the How section.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
descriptions(str): Description string, or list of description
strings.
references(:py:class:`voeventparse.misc.Reference`): A reference element
(or list thereof). |
def move_into(self, destination_folder):
headers = self.headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/move'
payload = '{ "DestinationId": "' + destination_folder.id + '"}'
r = requests.post(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
return self._json_to_folder(self.account, return_folder) | Move the Folder into a different folder.
This makes the Folder provided a child folder of the destination_folder.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Args:
destination_folder: A :class:`Folder <pyOutlook.core.folder.Folder>` that should become the parent
Returns:
A new :class:`Folder <pyOutlook.core.folder.Folder>` that is now
inside of the destination_folder. |
def init_app(self, app):
host = app.config.get('STATS_HOSTNAME', 'localhost')
port = app.config.get('STATS_PORT', 8125)
base_key = app.config.get('STATS_BASE_KEY', app.name)
client = _StatsClient(
host=host,
port=port,
prefix=base_key,
)
app.before_request(client.flask_time_start)
app.after_request(client.flask_time_end)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions.setdefault('stats', {})
app.extensions['stats'][self] = client
return client | Inititialise the extension with the app object.
:param app: Your application object |
def defaultSystem():
rsystem = platform.system()
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem | Return the canonicalized system name. |
def __parse(self) -> object:
char = self.data[self.idx: self.idx + 1]
if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:
str_len = int(self.__read_to(b':'))
return self.__read(str_len)
elif char == b'i':
self.idx += 1
return int(self.__read_to(b'e'))
elif char == b'd':
return self.__parse_dict()
elif char == b'l':
return self.__parse_list()
elif char == b'':
raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))
else:
raise bencodepy.DecodingError(
'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx))) | Selects the appropriate method to decode next bencode element and returns the result. |
def restore_row(self, row, schema):
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | Restore row from SQL |
def get_transactions(self, include_investment=False):
assert_pd()
s = StringIO(self.get_transactions_csv(
include_investment=include_investment))
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.np.nan))
return df | Returns the transaction data as a Pandas DataFrame. |
def change_nick(self, nick):
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick) | Update this user's nick in all joined channels. |
def setup_ssh_tunnel(job_id, local_port, remote_port):
cmd = ['dx', 'ssh', '--suppress-running-check', job_id, '-o', 'StrictHostKeyChecking no']
cmd += ['-f', '-L', '{0}:localhost:{1}'.format(local_port, remote_port), '-N']
subprocess.check_call(cmd) | Setup an ssh tunnel to the given job-id. This will establish
the port over the given local_port to the given remote_port
and then exit, keeping the tunnel in place until the job is
terminated. |
def save(self):
if self.rater is not None:
self.rater.set('modified', datetime.now().isoformat())
xml = parseString(tostring(self.root))
with open(self.xml_file, 'w') as f:
f.write(xml.toxml()) | Save xml to file. |
def delete(self, monitor_id):
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list()
bit = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
bit = monitor['monitor_id']
if not bit:
raise MonitorNotFound("No monitor was found with that term.")
url = self.ALERTS_DELETE_URL.format(requestX=self._state[3])
self._log.debug("Deleting alert using: %s" % url)
payload = [None, monitor_id]
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to delete by ID: %s"
% response.content)
return True | Delete a monitor by ID. |
def undo(self):
_, _, undo_state = self._undo_stack.back()
spike_clusters_new = self._spike_clusters_base.copy()
for spike_ids, cluster_ids, _ in self._undo_stack:
if spike_ids is not None:
spike_clusters_new[spike_ids] = cluster_ids
changed = np.nonzero(self._spike_clusters !=
spike_clusters_new)[0]
clusters_changed = spike_clusters_new[changed]
up = self._do_assign(changed, clusters_changed)
up.history = 'undo'
up.undo_state = undo_state
self.emit('cluster', up)
return up | Undo the last cluster assignment operation.
Returns
-------
up : UpdateInfo instance of the changes done by this operation. |
def get_all_chats(self):
chats = self.wapi_functions.getAllChats()
if chats:
return [factory_chat(chat, self) for chat in chats]
else:
return [] | Fetches all chats
:return: List of chats
:rtype: list[Chat] |
def get_all_longest_col_lengths(self):
response = {}
for col in self.col_list:
response[col] = self._longest_val_in_column(col)
return response | iterate over all columns and get their longest values
:return: dict, {"column_name": 132} |
def profile(fun, *args, **kwargs):
timer_name = kwargs.pop("prof_name", None)
if not timer_name:
module = inspect.getmodule(fun)
c = [module.__name__]
parentclass = labtypes.get_class_that_defined_method(fun)
if parentclass:
c.append(parentclass.__name__)
c.append(fun.__name__)
timer_name = ".".join(c)
start(timer_name)
ret = fun(*args, **kwargs)
stop(timer_name)
return ret | Profile a function. |
def list_of(validate_item):
def validate(value, should_raise=True):
validate_type = is_type(list)
if not validate_type(value, should_raise=should_raise):
return False
for item in value:
try:
validate_item(item)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "list contained an invalid item")
raise
return False
return True
return validate | Returns a validator function that succeeds only if the input is a list, and each item in the list passes as input
to the provided validator validate_item.
:param callable validate_item: the validator function for items in the list
:returns: a function which returns True its input is an list of valid items, and raises TypeError otherwise
:rtype: callable |
def set_of(*generators):
class SetOfGenerators(ArbitraryInterface):
@classmethod
def arbitrary(cls):
arbitrary_set = set()
for generator in generators:
arbitrary_set |= {
arbitrary(generator)
for _ in range(arbitrary(int) % 100)
}
return arbitrary_set
SetOfGenerators.__name__ = ''.join([
'set_of(', ', '.join(generator.__name__ for generator in generators),
')'
])
return SetOfGenerators | Generates a set consisting solely of the specified generators.
This is a class factory, it makes a class which is a closure around the
specified generators. |
def deepSetAttr(obj, path, val):
first, _, rest = path.rpartition('.')
return setattr(deepGetAttr(obj, first) if first else obj, rest, val) | Sets a deep attribute on an object by resolving a dot-delimited
path. If path does not exist an `AttributeError` will be raised`. |
def periodogram_auto(self, oversampling=5, nyquist_factor=3,
return_periods=True):
N = len(self.t)
T = np.max(self.t) - np.min(self.t)
df = 1. / T / oversampling
f0 = df
Nf = int(0.5 * oversampling * nyquist_factor * N)
freq = f0 + df * np.arange(Nf)
return 1. / freq, self._score_frequency_grid(f0, df, Nf) | Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency |
def is_discrete(self):
return self.bounds[1] == self.bounds[0] and\
self.included == (True,True) | Check whether this interval contains exactly one number
:rtype: bool |
def to_df(self, method: str = 'MEMORY', **kwargs) -> 'pd.DataFrame':
ll = self._is_valid()
if ll:
print(ll['LOG'])
return None
else:
return self.sas.sasdata2dataframe(self.table, self.libref, self.dsopts, method, **kwargs) | Export this SAS Data Set to a Pandas Data Frame
:param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data
:param kwargs:
:return: Pandas data frame |
def start_all_linking(self, link_type, group_id):
self.logger.info("start_all_linking for type %s group %s",
link_type, group_id)
self.direct_command_hub('0264' + link_type + group_id) | Begin all linking |
def entity_list(args):
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ] | List entities in a workspace. |
def all_equal(keys, axis=semantics.axis_default):
index = as_index(keys, axis)
return index.groups == 1 | returns true of all keys are equal |
def draw(self):
nodes_pos = {}
for node in self.graph.nodes():
nodes_pos[node] = (node.geom.x, node.geom.y)
plt.figure()
nx.draw_networkx(self.graph, nodes_pos, node_size=16, font_size=8)
plt.show() | Draw MV grid's graph using the geo data of nodes
Notes
-----
This method uses the coordinates stored in the nodes' geoms which
are usually conformal, not equidistant. Therefore, the plot might
be distorted and does not (fully) reflect the real positions or
distances between nodes. |
def convert_string(string):
if is_int(string):
return int(string)
elif is_float(string):
return float(string)
elif convert_bool(string)[0]:
return convert_bool(string)[1]
elif string == 'None':
return None
else:
return string | Convert string to int, float or bool. |
def toml(uncertainty):
text = uncertainty.text.strip()
if not text.startswith('['):
text = '[%s]' % text
for k, v in uncertainty.attrib.items():
try:
v = ast.literal_eval(v)
except ValueError:
v = repr(v)
text += '\n%s = %s' % (k, v)
return text | Converts an uncertainty node into a TOML string |
def start(self, children):
composites = []
for composite_dict in children:
if False and self.include_position:
key_token = composite_dict[1]
key_name = key_token.value.lower()
composites_position = self.get_position_dict(composite_dict)
composites_position[key_name] = self.create_position_dict(key_token, None)
composites.append(composite_dict)
if len(composites) == 1:
return composites[0]
else:
return composites | Parses a MapServer Mapfile
Parsing of partial Mapfiles or lists of composites is also possible |
def from_wif(cls, wif, network=BitcoinMainNet):
wif = ensure_str(wif)
try:
extended_key_bytes = base58.b58decode_check(wif)
except ValueError as e:
raise ChecksumException(e)
network_bytes = extended_key_bytes[0]
if not isinstance(network_bytes, six.integer_types):
network_bytes = ord(network_bytes)
if (network_bytes != network.SECRET_KEY):
raise incompatible_network_exception_factory(
network_name=network.NAME,
expected_prefix=network.SECRET_KEY,
given_prefix=network_bytes)
extended_key_bytes = extended_key_bytes[1:]
compressed = False
if len(extended_key_bytes) == 33:
extended_key_bytes = extended_key_bytes[:-1]
compressed = True
return cls(long_or_int(hexlify(extended_key_bytes), 16), network,
compressed=compressed) | Import a key in WIF format.
WIF is Wallet Import Format. It is a base58 encoded checksummed key.
See https://en.bitcoin.it/wiki/Wallet_import_format for a full
description.
This supports compressed WIFs - see this for an explanation:
http://bitcoin.stackexchange.com/questions/7299/when-importing-private-keys-will-compressed-or-uncompressed-format-be-used # nopep8
(specifically http://bitcoin.stackexchange.com/a/7958) |
def rate_limits(self):
if not self._rate_limits:
self._rate_limits = utilities.get_rate_limits(self.response)
return self._rate_limits | Returns a list of rate limit details. |
def _merge_any_two_boxes(self, box_list):
n = len(box_list)
for i in range(n):
for j in range(i + 1, n):
if self._are_nearby_parallel_boxes(box_list[i], box_list[j]):
a, b = box_list[i], box_list[j]
merged_points = np.vstack([a.points, b.points])
merged_box = RotatedBox.from_points(merged_points, self.box_type)
if merged_box.width / merged_box.height >= self.min_box_aspect:
box_list.remove(a)
box_list.remove(b)
box_list.append(merged_box)
return True
return False | Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found. |
def write_text(filename, data, add=False):
write_type = 'a' if add else 'w'
with open(filename, write_type) as file:
print(data, end='', file=file) | Write image data to text file
:param filename: name of text file to write data to
:type filename: str
:param data: image data to write to text file
:type data: numpy array
:param add: whether to append to existing file or not. Default is ``False``
:type add: bool |
def flat_list_to_polymer(atom_list, atom_group_s=4):
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer | Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5 |
def install(self, io_handler, module_name):
bundle = self._context.install_bundle(module_name)
io_handler.write_line("Bundle ID: {0}", bundle.get_bundle_id())
return bundle.get_bundle_id() | Installs the bundle with the given module name |
def get_task_progress(self, task_name):
params = {'instanceprogress': task_name, 'taskname': task_name}
resp = self._client.get(self.resource(), params=params)
return Instance.Task.TaskProgress.parse(self._client, resp) | Get task's current progress
:param task_name: task_name
:return: the task's progress
:rtype: :class:`odps.models.Instance.Task.TaskProgress` |
def export(self, out_filename):
with zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED) as arc:
id_list = list(self.get_thread_info())
for num, my_info in enumerate(id_list):
logging.info('Working on item %i : %s', num, my_info['number'])
my_thread = GitHubCommentThread(
self.gh_info.owner, self.gh_info.realm, my_info['title'],
self.gh_info.user, self.gh_info.token,
thread_id=my_info['number'])
csec = my_thread.get_comment_section()
cdict = [item.to_dict() for item in csec.comments]
my_json = json.dumps(cdict)
arc.writestr('%i__%s' % (my_info['number'], my_info['title']),
my_json) | Export desired threads as a zipfile to out_filename. |
def Read(f):
try:
yaml_data = yaml.load(f)
except yaml.YAMLError as e:
raise ParseError('%s' % e)
except IOError as e:
raise YAMLLoadError('%s' % e)
_CheckData(yaml_data)
try:
return Config(
yaml_data.get('blacklist', ()),
yaml_data.get('whitelist', ('*')))
except UnicodeDecodeError as e:
raise YAMLLoadError('%s' % e) | Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file. |
def combine_with(self, rgbd_im):
new_data = self.data.copy()
depth_data = self.depth.data
other_depth_data = rgbd_im.depth.data
depth_zero_px = self.depth.zero_pixels()
depth_replace_px = np.where(
(other_depth_data != 0) & (
other_depth_data < depth_data))
depth_replace_px = np.c_[depth_replace_px[0], depth_replace_px[1]]
new_data[depth_zero_px[:, 0], depth_zero_px[:, 1],
:] = rgbd_im.data[depth_zero_px[:, 0], depth_zero_px[:, 1], :]
new_data[depth_replace_px[:, 0], depth_replace_px[:, 1],
:] = rgbd_im.data[depth_replace_px[:, 0], depth_replace_px[:, 1], :]
return RgbdImage(new_data, frame=self.frame) | Replaces all zeros in the source rgbd image with the values of a different rgbd image
Parameters
----------
rgbd_im : :obj:`RgbdImage`
rgbd image to combine with
Returns
-------
:obj:`RgbdImage`
the combined rgbd image |
def flush(self):
if self.shutdown:
return
self.flush_buffers(force=True)
self.queue.put(FLUSH_MARKER)
self.queue.join() | Ensure all logging output has been flushed. |
def _mpl_to_vispy(fig):
renderer = VispyRenderer()
exporter = Exporter(renderer)
with warnings.catch_warnings(record=True):
exporter.run(fig)
renderer._vispy_done()
return renderer.canvas | Convert a given matplotlib figure to vispy
This function is experimental and subject to change!
Requires matplotlib and mplexporter.
Parameters
----------
fig : instance of matplotlib Figure
The populated figure to display.
Returns
-------
canvas : instance of Canvas
The resulting vispy Canvas. |
def _guess_record(self, rtype, name=None, content=None):
records = self._list_records_internal(
identifier=None, rtype=rtype, name=name, content=content)
if len(records) == 1:
return records[0]
if len(records) > 1:
raise Exception(
'Identifier was not provided and several existing '
'records match the request for {0}/{1}'.format(rtype, name))
raise Exception(
'Identifier was not provided and no existing records match '
'the request for {0}/{1}'.format(rtype, name)) | Tries to find existing unique record by type, name and content |
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
if cname is None:
cname = name or 'values_block_{idx}'.format(idx=i)
if name is None:
name = cname
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_{group}".format(group=m.groups()[0])
except IndexError:
pass
return cls(name=name, cname=cname, **kwargs) | return a new datacol with the block i |
def get_crash_signature(error_line):
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term | Try to get a crash signature from the given error_line string. |
def field_value(self, value):
if not self.is_array:
return self.field_type(value)
if isinstance(value, (list, tuple, set)):
return [self.field_type(item) for item in value]
return self.field_type(value) | Validate against NodeType. |
def _get_kernel_data(self, nmr_samples, thinning, return_output):
kernel_data = {
'data': self._data,
'method_data': self._get_mcmc_method_kernel_data(),
'nmr_iterations': Scalar(nmr_samples * thinning, ctype='ulong'),
'iteration_offset': Scalar(self._sampling_index, ctype='ulong'),
'rng_state': Array(self._rng_state, 'uint', mode='rw', ensure_zero_copy=True),
'current_chain_position': Array(self._current_chain_position, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
'current_log_likelihood': Array(self._current_log_likelihood, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
'current_log_prior': Array(self._current_log_prior, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
}
if return_output:
kernel_data.update({
'samples': Zeros((self._nmr_problems, self._nmr_params, nmr_samples), ctype='mot_float_type'),
'log_likelihoods': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'),
'log_priors': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'),
})
return kernel_data | Get the kernel data we will input to the MCMC sampler.
This sets the items:
* data: the pointer to the user provided data
* method_data: the data specific to the MCMC method
* nmr_iterations: the number of iterations to sample
* iteration_offset: the current sample index, that is, the offset to the given number of iterations
* rng_state: the random number generator state
* current_chain_position: the current position of the sampled chain
* current_log_likelihood: the log likelihood of the current position on the chain
* current_log_prior: the log prior of the current position on the chain
Additionally, if ``return_output`` is True, we add to that the arrays:
* samples: for the samples
* log_likelihoods: for storing the log likelihoods
* log_priors: for storing the priors
Args:
nmr_samples (int): the number of samples we will draw
thinning (int): the thinning factor we want to use
return_output (boolean): if the kernel should return output
Returns:
dict[str: mot.lib.utils.KernelData]: the kernel input data |
def on_attribute(self, node):
ctx = node.ctx.__class__
if ctx == ast.Store:
msg = "attribute for storage: shouldn't be here!"
self.raise_exception(node, exc=RuntimeError, msg=msg)
sym = self.run(node.value)
if ctx == ast.Del:
return delattr(sym, node.attr)
fmt = "cannnot access attribute '%s' for %s"
if node.attr not in UNSAFE_ATTRS:
fmt = "no attribute '%s' for %s"
try:
return getattr(sym, node.attr)
except AttributeError:
pass
obj = self.run(node.value)
msg = fmt % (node.attr, obj)
self.raise_exception(node, exc=AttributeError, msg=msg) | Extract attribute. |
def _auto_commit(self, by_count=False):
if (self._stopping or self._shuttingdown or (not self._start_d) or
(self._last_processed_offset is None) or
(not self.consumer_group) or
(by_count and not self.auto_commit_every_n)):
return
if (not by_count or self._last_committed_offset is None or
(self._last_processed_offset - self._last_committed_offset
) >= self.auto_commit_every_n):
if not self._commit_ds:
commit_d = self.commit()
commit_d.addErrback(self._handle_auto_commit_error)
else:
d = Deferred()
d.addCallback(self._retry_auto_commit, by_count)
self._commit_ds.append(d) | Check if we should start a new commit operation and commit |
def config(config_dict: typing.Mapping) -> Config:
logger.debug(f"Updating with {config_dict}")
_cfg.update(config_dict)
return _cfg | Configures the konch shell. This function should be called in a
.konchrc file.
:param dict config_dict: Dict that may contain 'context', 'banner', and/or
'shell' (default shell class to use). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.