code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _filter_plans(attr, name, plans):
return [plan for plan in plans if plan[attr] == name] | Helper to return list of usage plan items matching the given attribute value. |
def makedbthreads(self):
for sample in self.metadata:
if sample[self.analysistype].combinedtargets != 'NA':
self.targetfolders.add(sample[self.analysistype].targetpath)
for i in range(len(self.targetfolders)):
threads = Thread(target=self.makeblastdb, args=())
threads.setDaemon(True)
threads.start()
for targetdir in self.targetfolders:
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
try:
_ = self.targetfiles[0]
except IndexError:
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
for targetfile in self.targetfiles:
self.records[targetfile] = SeqIO.to_dict(SeqIO.parse(targetfile, 'fasta'))
self.dqueue.put(targetfile)
self.dqueue.join() | Setup and create threads for class |
def addAggShkDstn(self,AggShkDstn):
if len(self.IncomeDstn[0]) > 3:
self.IncomeDstn = self.IncomeDstnWithoutAggShocks
else:
self.IncomeDstnWithoutAggShocks = self.IncomeDstn
self.IncomeDstn = [combineIndepDstns(self.IncomeDstn[t],AggShkDstn) for t in range(self.T_cycle)] | Updates attribute IncomeDstn by combining idiosyncratic shocks with aggregate shocks.
Parameters
----------
AggShkDstn : [np.array]
Aggregate productivity shock distribution. First element is proba-
bilities, second element is agg permanent shocks, third element is
agg transitory shocks.
Returns
-------
None |
def _is_device(path):
out = __salt__['cmd.run_all']('file -i {0}'.format(path))
_verify_run(out)
return re.split(r'\s+', out['stdout'])[1][:-1] == 'inode/blockdevice' | Return True if path is a physical device. |
def convert_to_string(ndarr):
with contextlib.closing(BytesIO()) as bytesio:
_internal_write(bytesio, ndarr)
return bytesio.getvalue() | Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and
returns it. |
def _GetSignatureMatchParserNames(self, file_object):
parser_names = []
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
for scan_result in iter(scan_state.scan_results):
format_specification = (
self._formats_with_signatures.GetSpecificationBySignature(
scan_result.identifier))
if format_specification.identifier not in parser_names:
parser_names.append(format_specification.identifier)
return parser_names | Determines if a file-like object matches one of the known signatures.
Args:
file_object (file): file-like object whose contents will be checked
for known signatures.
Returns:
list[str]: parser names for which the contents of the file-like object
matches their known signatures. |
def run(self, **client_params):
try:
self.send(self.get_collection_endpoint(),
http_method="POST",
**client_params)
except Exception as e:
raise CartoException(e) | Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException |
def find_guest(name, quiet=False, path=None):
if quiet:
log.warning("'quiet' argument is being deprecated."
' Please migrate to --quiet')
for data in _list_iter(path=path):
host, l = next(six.iteritems(data))
for x in 'running', 'frozen', 'stopped':
if name in l[x]:
if not quiet:
__jid_event__.fire_event(
{'data': host,
'outputter': 'lxc_find_host'},
'progress')
return host
return None | Returns the host for a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
.. code-block:: bash
salt-run lxc.find_guest name |
def to_sky(self, wcs, mode='all'):
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAperture(**sky_params) | Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object. |
def get_empty_tracks(self):
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls. |
def _multiline_convert(config, start="banner login", end="EOF", depth=1):
ret = list(config)
try:
s = ret.index(start)
e = s
while depth:
e = ret.index(end, e + 1)
depth = depth - 1
except ValueError:
return ret
ret[s] = {"cmd": ret[s], "input": "\n".join(ret[s + 1 : e])}
del ret[s + 1 : e + 1]
return ret | Converts running-config HEREDOC into EAPI JSON dict |
def update_descriptor_le(self, lineedit, tf):
if tf:
descriptor = tf.descriptor
lineedit.setText(descriptor)
else:
lineedit.setText("") | Update the given line edit to show the descriptor that is stored in the index
:param lineedit: the line edit to update with the descriptor
:type lineedit: QLineEdit
:param tf: the selected taskfileinfo
:type tf: :class:`TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None |
def write(self, data):
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror) | Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes. |
def account_delete(request, username,
template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
user = get_object_or_404(get_user_model(),
username__iexact=username)
user.is_active = False
user.save()
return redirect(reverse('accounts_admin')) | Delete an account. |
def create_mv_rule(tensorprod_rule, dim):
def mv_rule(order, sparse=False, part=None):
if sparse:
order = numpy.ones(dim, dtype=int)*order
tensorprod_rule_ = lambda order, part=part:\
tensorprod_rule(order, part=part)
return chaospy.quad.sparse_grid(tensorprod_rule_, order)
return tensorprod_rule(order, part=part)
return mv_rule | Convert tensor product rule into a multivariate quadrature generator. |
def signal_committed_filefields(sender, instance, **kwargs):
for field_name in getattr(instance, '_uncommitted_filefields', ()):
fieldfile = getattr(instance, field_name)
if fieldfile:
signals.saved_file.send_robust(sender=sender, fieldfile=fieldfile) | A post_save signal handler which sends a signal for each ``FileField`` that
was committed this save. |
def find_transition(self, gene: Gene, multiplexes: Tuple[Multiplex, ...]) -> Transition:
multiplexes = tuple(multiplex for multiplex in multiplexes if gene in multiplex.genes)
for transition in self.transitions:
if transition.gene == gene and set(transition.multiplexes) == set(multiplexes):
return transition
raise AttributeError(f'transition K_{gene.name}' + ''.join(f"+{multiplex!r}" for multiplex in multiplexes) + ' does not exist') | Find and return a transition in the model for the given gene and multiplexes.
Raise an AttributeError if there is no multiplex in the graph with the given name. |
def emit(event, *args, **kwargs):
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue) | Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``. |
def sequence_charge(seq, pH=7.4):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
adj_protein_charge = sum(
[partial_charge(aa, pH) * residue_charge[aa] * n
for aa, n in Counter(seq).items()])
adj_protein_charge += (
partial_charge('N-term', pH) * residue_charge['N-term'])
adj_protein_charge += (
partial_charge('C-term', pH) * residue_charge['C-term'])
return adj_protein_charge | Calculates the total charge of the input polypeptide sequence.
Parameters
----------
seq : str
Sequence of amino acids.
pH : float
pH of interest. |
def allowed_transitions():
try:
sdp_state = SDPState()
return sdp_state.allowed_target_states[sdp_state.current_state]
except KeyError:
LOG.error("Key Error")
return dict(state="KeyError", reason="KeyError") | Get target states allowed for the current state. |
def _DecodeUrlSafe(urlsafe):
if not isinstance(urlsafe, basestring):
raise TypeError('urlsafe must be a string; received %r' % urlsafe)
if isinstance(urlsafe, unicode):
urlsafe = urlsafe.encode('utf8')
mod = len(urlsafe) % 4
if mod:
urlsafe += '=' * (4 - mod)
return base64.b64decode(urlsafe.replace('-', '+').replace('_', '/')) | Decode a url-safe base64-encoded string.
This returns the decoded string. |
def with_params(self, params):
return self.replace(params=_merge_maps(self.params, params)) | Create a new request with added query parameters
Parameters
----------
params: Mapping
the query parameters to add |
def validate_accounting_equation(cls):
balances = [account.balance(raw=True) for account in Account.objects.root_nodes()]
if sum(balances, Balance()) != 0:
raise exceptions.AccountingEquationViolationError(
"Account balances do not sum to zero. They sum to {}".format(sum(balances))
) | Check that all accounts sum to 0 |
def get_participants_for_section(section, person=None):
section_label = encode_section_label(section.section_label())
url = "/rest/gradebook/v1/section/{}/participants".format(section_label)
headers = {}
if person is not None:
headers["X-UW-Act-as"] = person.uwnetid
data = get_resource(url, headers)
participants = []
for pt in data["participants"]:
participants.append(_participant_from_json(pt))
return participants | Returns a list of gradebook participants for the passed section and person. |
def _ask_questionnaire():
answers = {}
print(info_header)
pprint(questions.items())
for question, default in questions.items():
response = _ask(question, default, str(type(default)), show_hint=True)
if type(default) == unicode and type(response) != str:
response = response.decode('utf-8')
answers[question] = response
return answers | Asks questions to fill out a HFOS plugin template |
def trigger_script(self):
if self.remote_bridge.status not in (BRIDGE_STATUS.RECEIVED,):
return [1]
try:
self.remote_bridge.parsed_script = UpdateScript.FromBinary(self._device.script)
self.remote_bridge.status = BRIDGE_STATUS.IDLE
except Exception as exc:
self._logger.exception("Error parsing script streamed to device")
self.remote_bridge.script_error = exc
self.remote_bridge.error = 1
return [0] | Actually process a script. |
def reprkwargs(kwargs, sep=', ', fmt="{0!s}={1!r}"):
return sep.join(fmt.format(k, v) for k, v in kwargs.iteritems()) | Display kwargs. |
def set_log_format(log_format, server=_DEFAULT_SERVER):
setting = 'LogPluginClsid'
log_format_types = get_log_format_types()
format_id = log_format_types.get(log_format, None)
if not format_id:
message = ("Invalid log format '{0}' specified. Valid formats:"
' {1}').format(log_format, log_format_types.keys())
raise SaltInvocationError(message)
_LOG.debug("Id for '%s' found: %s", log_format, format_id)
current_log_format = get_log_format(server)
if log_format == current_log_format:
_LOG.debug('%s already contains the provided format.', setting)
return True
_set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server)
new_log_format = get_log_format(server)
ret = log_format == new_log_format
if ret:
_LOG.debug("Setting %s configured successfully: %s", setting, log_format)
else:
_LOG.error("Unable to configure %s with value: %s", setting, log_format)
return ret | Set the active log format for the SMTP virtual server.
:param str log_format: The log format name.
:param str server: The SMTP server name.
:return: A boolean representing whether the change succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' |
def _serve_runs(self, request):
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute(
)
run_names = [row[0] for row in cursor]
else:
run_names = sorted(self._multiplexer.Runs())
def get_first_event_timestamp(run_name):
try:
return self._multiplexer.FirstEventTimestamp(run_name)
except ValueError as e:
logger.warn(
'Unable to get first event timestamp for run %s: %s', run_name, e)
return float('inf')
run_names.sort(key=get_first_event_timestamp)
return http_util.Respond(request, run_names, 'application/json') | Serve a JSON array of run names, ordered by run started time.
Sort order is by started time (aka first event time) with empty times sorted
last, and then ties are broken by sorting on the run name. |
def chmod(f):
try:
os.chmod(f, S_IWRITE)
except Exception as e:
pass
try:
os.chmod(f, 0o777)
except Exception as e:
pass | change mod to writeable |
def enable_glut(self, app=None):
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True | Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH |
def download_and_parse_mnist_file(fname, target_dir=None, force=False):
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd) | Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file |
def form(**kwargs: Question):
return Form(*(FormField(k, q) for k, q in kwargs.items())) | Create a form with multiple questions.
The parameter name of a question will be the key for the answer in
the returned dict. |
def __get_ac_tree(self, ac: model.AssetClass, with_stocks: bool):
output = []
output.append(self.__get_ac_row(ac))
for child in ac.classes:
output += self.__get_ac_tree(child, with_stocks)
if with_stocks:
for stock in ac.stocks:
row = None
if isinstance(stock, Stock):
row = self.__get_stock_row(stock, ac.depth + 1)
elif isinstance(stock, CashBalance):
row = self.__get_cash_row(stock, ac.depth + 1)
output.append(row)
return output | formats the ac tree - entity with child elements |
def find_largest_contig(self):
for sample in self.metadata:
sample[self.analysistype].longest_contig = sample[self.analysistype].contig_lengths | Determine the largest contig for each strain |
def _create_aural_content_element(self, content, data_property_value):
content_element = self._create_content_element(
content,
data_property_value
)
content_element.set_attribute('unselectable', 'on')
content_element.set_attribute('class', 'screen-reader-only')
return content_element | Create a element to show the content, only to aural displays.
:param content: The text content of element.
:type content: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:return: The element to show the content.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement |
def _add_baseline_to_exclude_files(args):
baseline_name_regex = r'^{}$'.format(args.import_filename[0])
if not args.exclude_files:
args.exclude_files = baseline_name_regex
elif baseline_name_regex not in args.exclude_files:
args.exclude_files += r'|{}'.format(baseline_name_regex) | Modifies args.exclude_files in-place. |
def focus_changed(self):
fwidget = QApplication.focusWidget()
for finfo in self.data:
if fwidget is finfo.editor:
self.refresh()
self.editor_focus_changed.emit() | Editor focus has changed |
def set_measurements(test):
test.measurements.level_none = 0
time.sleep(1)
test.measurements.level_some = 8
time.sleep(1)
test.measurements.level_all = 9
time.sleep(1)
level_all = test.get_measurement('level_all')
assert level_all.value == 9 | Test phase that sets a measurement. |
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/generate%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('generate'), name="api_tileset_generate"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('download'), name="api_tileset_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/status%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('status'), name="api_tileset_status"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/stop%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('stop'), name="api_tileset_stop"),
] | Add the following array of urls to the Tileset base urls |
def _abort_batches(self):
error = Errors.IllegalStateError("Producer is closed forcefully.")
for batch in self._incomplete.all():
tp = batch.topic_partition
with self._tp_locks[tp]:
batch.records.close()
batch.done(exception=error)
self.deallocate(batch) | Go through incomplete batches and abort them. |
def _read_protocol_line(self):
while True:
line = self._proc.stdout.readline().decode('utf-8')
if not line:
raise jsonrpc_client_base.AppStartError(
self._ad, 'Unexpected EOF waiting for app to start')
line = line.strip()
if (line.startswith('INSTRUMENTATION_RESULT:')
or line.startswith('SNIPPET ')):
self.log.debug(
'Accepted line from instrumentation output: "%s"', line)
return line
self.log.debug('Discarded line from instrumentation output: "%s"',
line) | Reads the next line of instrumentation output relevant to snippets.
This method will skip over lines that don't start with 'SNIPPET' or
'INSTRUMENTATION_RESULT'.
Returns:
(str) Next line of snippet-related instrumentation output, stripped.
Raises:
jsonrpc_client_base.AppStartError: If EOF is reached without any
protocol lines being read. |
def log_detail(job_id=None, task_name=None, log_id=None):
jobs = get_jobs()
job = [job for job in jobs if str(job['job_id']) == job_id][0]
return render_template('log_detail.html',
job=job,
task_name=task_name,
task=[task for task in job['tasks']
if task['name'] == task_name][0],
log_id=log_id) | Show a detailed description of a specific log. |
def element_href_use_filter(name, _filter):
if name and _filter:
element = fetch_meta_by_name(name, filter_context=_filter)
if element.json:
return element.json.pop().get('href') | Get element href using filter
Filter should be a valid entry point value, ie host, router, network,
single_fw, etc
:param name: name of element
:param _filter: filter type, unknown filter will result in no matches
:return: element href (if found), else None |
def add_note(path, filename="note.txt"):
path = os.path.expanduser(path)
assert os.path.isdir(path), "{} is not a valid directory.".format(path)
filepath = os.path.join(path, filename)
exists = os.path.isfile(filepath)
try:
subprocess.call([EDITOR, filepath])
except Exception as exc:
logger.error("Editing note failed!")
raise exc
if exists:
print("Note updated at:", filepath)
else:
print("Note created at:", filepath) | Opens a txt file at the given path where user can add and save notes.
Args:
path (str): Directory where note will be saved.
filename (str): Name of note. Defaults to "note.txt" |
def _rgbtomask(self, obj):
dat = obj.get_image().get_data()
return dat.sum(axis=2).astype(np.bool) | Convert RGB arrays from mask canvas object back to boolean mask. |
def preformat_call(self, api_call):
api_call_formatted = api_call.lstrip('/')
api_call_formatted = api_call_formatted.rstrip('?')
if api_call != api_call_formatted:
logger.debug('api_call post strip =\n%s' % api_call_formatted)
return api_call_formatted | Return properly formatted QualysGuard API call. |
def get_changed_files(self) -> List[str]:
out = shell_tools.output_of(
'git',
'diff',
'--name-only',
self.compare_commit_id,
self.actual_commit_id,
'--',
cwd=self.destination_directory)
return [e for e in out.split('\n') if e.strip()] | Get the files changed on one git branch vs another.
Returns:
List[str]: File paths of changed files, relative to the git repo
root. |
def _get_next(request):
next = request.POST.get('next', request.GET.get('next',
request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next | The part that's the least straightforward about views in this module is
how they determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the
following order:
1. If there is a variable named ``next`` in the *POST* parameters, the
view will redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters,
the view will redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers,
the view will redirect to that previous page. |
def updates_selection(update_selection):
def handle_update(selection, *args, **kwargs):
old_selection = selection.get_all()
update_selection(selection, *args, **kwargs)
new_selection = selection.get_all()
affected_models = old_selection ^ new_selection
if len(affected_models) != 0:
deselected_models = old_selection - new_selection
selected_models = new_selection - old_selection
map(selection.relieve_model, deselected_models)
map(selection.observe_model, selected_models)
selection.update_core_element_lists()
if selection.focus and selection.focus not in new_selection:
del selection.focus
affected_classes = set(model.core_element.__class__ for model in affected_models)
msg_namedtuple = SelectionChangedSignalMsg(update_selection.__name__, new_selection, old_selection,
affected_classes)
selection.selection_changed_signal.emit(msg_namedtuple)
if selection.parent_signal is not None:
selection.parent_signal.emit(msg_namedtuple)
return handle_update | Decorator indicating that the decorated method could change the selection |
def compute_date_range_chunks(sessions, start_date, end_date, chunksize):
if start_date not in sessions:
raise KeyError("Start date %s is not found in calendar." %
(start_date.strftime("%Y-%m-%d"),))
if end_date not in sessions:
raise KeyError("End date %s is not found in calendar." %
(end_date.strftime("%Y-%m-%d"),))
if end_date < start_date:
raise ValueError("End date %s cannot precede start date %s." %
(end_date.strftime("%Y-%m-%d"),
start_date.strftime("%Y-%m-%d")))
if chunksize is None:
return [(start_date, end_date)]
start_ix, end_ix = sessions.slice_locs(start_date, end_date)
return (
(r[0], r[-1]) for r in partition_all(
chunksize, sessions[start_ix:end_ix]
)
) | Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for. |
def GetZipInfoByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) > 1:
return self._zip_file.getinfo(location[1:])
return None | Retrieves the ZIP info for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect. |
def _find_combo_data(widget, value):
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,)) | Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found |
def get_scalar_product(self, other):
return self.x*other.x+self.y*other.y | Returns the scalar product of this vector with the given
other vector. |
def compute_mga_entropy_stat(mga_vec, codon_pos,
stat_func=np.mean,
default_val=0.0):
if mga_vec is None:
return default_val
myscores = fetch_mga_scores(mga_vec, codon_pos)
if myscores is not None and len(myscores):
score_stat = stat_func(myscores)
else:
score_stat = default_val
return score_stat | Compute MGA entropy conservation statistic
Parameters
----------
mga_vec : np.array
numpy vector containing MGA Entropy conservation scores for residues
codon_pos : list of int
position of codon in protein sequence
stat_func : function, default=np.mean
function that calculates a statistic
default_val : float
default value to return if there are no mutations
Returns
-------
score_stat : float
MGA entropy score statistic for provided mutation list |
def pdf(self, mu):
if self.transform is not None:
mu = self.transform(mu)
return ss.poisson.pmf(mu, self.lmd0) | PDF for Poisson prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu) |
def get_alternative(self, experiment_name):
experiment = None
try:
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP | Get the alternative this user is enrolled in. |
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color):
try:
pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker]
except KeyError:
pass
else:
if (marker_face_color is not None) and pgfplots_marker == "o":
pgfplots_marker = "*"
data["tikz libs"].add("plotmarks")
marker_options = None
return (data, pgfplots_marker, marker_options)
try:
data["tikz libs"].add("plotmarks")
pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker]
except KeyError:
pass
else:
if (
marker_face_color is not None
and (
not isinstance(marker_face_color, str)
or marker_face_color.lower() != "none"
)
and pgfplots_marker not in ["|", "-", "asterisk", "star"]
):
pgfplots_marker += "*"
return (data, pgfplots_marker, marker_options)
return data, None, None | Translates a marker style of matplotlib to the corresponding style
in PGFPlots. |
def bartlett(timeseries, segmentlength, **kwargs):
kwargs.pop('noverlap', None)
return welch(timeseries, segmentlength, noverlap=0, **kwargs) | Calculate a PSD using Bartlett's method |
def stop_service(name):
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_STOP
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_STOP) | Stop the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{pause_service}, L{resume_service} |
def item_selection_changed(self):
is_selection = len(self.selectedItems()) > 0
self.expand_selection_action.setEnabled(is_selection)
self.collapse_selection_action.setEnabled(is_selection) | Item selection has changed |
def lasio_get(l,
section,
item,
attrib='value',
default=None,
remap=None,
funcs=None):
remap = remap or {}
item_to_fetch = remap.get(item, item)
if item_to_fetch is None:
return None
try:
obj = getattr(l, section)
result = getattr(obj, item_to_fetch)[attrib]
except:
return default
if funcs is not None:
f = funcs.get(item, null)
result = f(result)
return result | Grabs, renames and transforms stuff from a lasio object.
Args:
l (lasio): a lasio instance.
section (str): The LAS section to grab from, eg ``well``
item (str): The item in the LAS section to grab from, eg ``name``
attrib (str): The attribute of the item to grab, eg ``value``
default (str): What to return instead.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
The transformed item. |
def get_system_by_name(self, name):
for elem in self.systems:
if elem.name == name:
return elem
return None | Return a system with that name or None. |
def find_nn_triangles_point(tri, cur_tri, point):
r
nn = []
candidates = set(tri.neighbors[cur_tri])
candidates |= set(tri.neighbors[tri.neighbors[cur_tri]].flat)
candidates.discard(-1)
for neighbor in candidates:
triangle = tri.points[tri.simplices[neighbor]]
cur_x, cur_y = circumcenter(triangle[0], triangle[1], triangle[2])
r = circumcircle_radius_2(triangle[0], triangle[1], triangle[2])
if dist_2(point[0], point[1], cur_x, cur_y) < r:
nn.append(neighbor)
return nn | r"""Return the natural neighbors of a triangle containing a point.
This is based on the provided Delaunay Triangulation.
Parameters
----------
tri: Object
A Delaunay Triangulation
cur_tri: int
Simplex code for Delaunay Triangulation lookup of
a given triangle that contains 'position'.
point: (x, y)
Coordinates used to calculate distances to
simplexes in 'tri'.
Returns
-------
nn: (N, ) array
List of simplex codes for natural neighbor
triangles in 'tri'. |
def gramian(self):
if self.mode == 'spark':
rdd = self.values.tordd()
from pyspark.accumulators import AccumulatorParam
class MatrixAccumulator(AccumulatorParam):
def zero(self, value):
return zeros(shape(value))
def addInPlace(self, val1, val2):
val1 += val2
return val1
global mat
init = zeros((self.shape[1], self.shape[1]))
mat = rdd.context.accumulator(init, MatrixAccumulator())
def outer_sum(x):
global mat
mat += outer(x, x)
rdd.values().foreach(outer_sum)
return self._constructor(mat.value, index=self.index)
if self.mode == 'local':
return self._constructor(dot(self.values.T, self.values), index=self.index) | Compute gramian of a distributed matrix.
The gramian is defined as the product of the matrix
with its transpose, i.e. A^T * A. |
def transform(self, jam, query=None):
anns = []
if query:
results = jam.search(**query)
else:
results = jam.annotations
for ann in results:
try:
anns.append(jams.nsconvert.convert(ann, self.namespace))
except jams.NamespaceError:
pass
duration = jam.file_metadata.duration
if not anns:
anns = [self.empty(duration)]
results = []
for ann in anns:
results.append(self.transform_annotation(ann, duration))
if ann.time is None or ann.duration is None:
valid = [0, duration]
else:
valid = [ann.time, ann.time + ann.duration]
results[-1]['_valid'] = time_to_frames(valid, sr=self.sr,
hop_length=self.hop_length)
return self.merge(results) | Transform jam object to make data for this task
Parameters
----------
jam : jams.JAMS
The jams container object
query : string, dict, or callable [optional]
An optional query to narrow the elements of `jam.annotations`
to be considered.
If not provided, all annotations are considered.
Returns
-------
data : dict
A dictionary of transformed annotations.
All annotations which can be converted to the target namespace
will be converted. |
def algebra_simplify(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size, digits=5)
for _ in range(nbr_cases):
sample, target = generate_algebra_simplify_sample(
alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
} | Generate the algebra simplify dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to simplify the expression. The target is the resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the expression to simplify, and
target-list is a list of tokens encoding the resulting math expression after
simplifying.
Raises:
ValueError: If `max_depth` < `min_depth`. |
def get_channelstate_settled(
chain_state: ChainState,
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
) -> List[NettingChannelState]:
return get_channelstate_filter(
chain_state,
payment_network_id,
token_address,
lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_SETTLED,
) | Return the state of settled channels in a token network. |
def get(self, do_process_raw_report = True):
"Read report from device"
assert(self.__hid_object.is_opened())
if self.__report_kind != HidP_Input and \
self.__report_kind != HidP_Feature:
raise HIDError("Only for input or feature reports")
self.__alloc_raw_data()
raw_data = self.__raw_data
raw_data[0] = self.__report_id
read_function = None
if self.__report_kind == HidP_Feature:
read_function = hid_dll.HidD_GetFeature
elif self.__report_kind == HidP_Input:
read_function = hid_dll.HidD_GetInputReport
if read_function and read_function(int(self.__hid_object.hid_handle),
byref(raw_data), len(raw_data)):
if do_process_raw_report:
self.set_raw_data(raw_data)
self.__hid_object._process_raw_report(raw_data)
return helpers.ReadOnlyList(raw_data)
return helpers.ReadOnlyList([]) | Read report from device |
def add_data_file(data_files, target, source):
for t, f in data_files:
if t == target:
break
else:
data_files.append((target, []))
f = data_files[-1][1]
if source not in f:
f.append(source) | Add an entry to data_files |
def output_lines(output, encoding='utf-8', error_exc=None):
if isinstance(output, ExecResult):
exit_code, output = output
if exit_code != 0 and error_exc is not None:
raise error_exc(output.decode(encoding))
return output.decode(encoding).splitlines() | Convert bytestring container output or the result of a container exec
command into a sequence of unicode lines.
:param output:
Container output bytes or an
:class:`docker.models.containers.ExecResult` instance.
:param encoding:
The encoding to use when converting bytes to unicode
(default ``utf-8``).
:param error_exc:
Optional exception to raise if ``output`` is an ``ExecResult`` with a
nonzero exit code.
:returns: list[str] |
def infect(cls, graph, key, default_scope=None):
func = graph.factory_for(key)
if isinstance(func, cls):
func = func.func
factory = cls(key, func, default_scope)
graph._registry.factories[key] = factory
return factory | Forcibly convert an entry-point based factory to a ScopedFactory.
Must be invoked before resolving the entry point.
:raises AlreadyBoundError: for non entry-points; these should be declared with @scoped_binding |
def create_issue(self, title, body, labels=None):
kwargs = self.github_request.create(
title=title, body=body, labels=labels)
return GithubIssue(github_request=self.github_request, **kwargs) | Creates a new issue in Github.
:params title: title of the issue to be created
:params body: body of the issue to be created
:params labels: (optional) list of labels for the issue
:returns: newly created issue
:rtype: :class:`exreporter.stores.github.GithubIssue` |
def run_matrix_in_parallel(self, process_data):
worker_data = [{'matrix': entry, 'pipeline': process_data.pipeline,
'model': process_data.model, 'options': process_data.options,
'hooks': process_data.hooks} for entry in self.matrix
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags)]
output = []
success = True
with closing(multiprocessing.Pool(multiprocessing.cpu_count())) as pool:
for result in pool.map(matrix_worker, worker_data):
output += result['output']
if not result['success']:
success = False
return {'success': success, 'output': output} | Running pipelines in parallel. |
def get_namespace_entry(self, url: str, name: str) -> Optional[NamespaceEntry]:
entry_filter = and_(Namespace.url == url, NamespaceEntry.name == name)
result = self.session.query(NamespaceEntry).join(Namespace).filter(entry_filter).all()
if 0 == len(result):
return
if 1 < len(result):
log.warning('result for get_namespace_entry is too long. Returning first of %s', [str(r) for r in result])
return result[0] | Get a given NamespaceEntry object.
:param url: The url of the namespace source
:param name: The value of the namespace from the given url's document |
def load_mmd():
global _MMD_LIB
global _LIB_LOCATION
try:
lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()]
_LIB_LOCATION = os.path.abspath(os.path.join(DEFAULT_LIBRARY_DIR, lib_file))
if not os.path.isfile(_LIB_LOCATION):
_LIB_LOCATION = ctypes.util.find_library('MultiMarkdown')
_MMD_LIB = ctypes.cdll.LoadLibrary(_LIB_LOCATION)
except:
_MMD_LIB = None | Loads libMultiMarkdown for usage |
def inference(images, num_classes, for_training=False, restore_logits=True,
scope=None):
batch_norm_params = {
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
'epsilon': 0.001,
}
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
_activation_summaries(endpoints)
auxiliary_logits = endpoints['aux_logits']
return logits, auxiliary_logits | Build Inception v3 model architecture.
See here for reference: http://arxiv.org/abs/1512.00567
Args:
images: Images returned from inputs() or distorted_inputs().
num_classes: number of classes
for_training: If set to `True`, build the inference model for training.
Kernels that operate differently for inference during training
e.g. dropout, are appropriately configured.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: optional prefix string identifying the ImageNet tower.
Returns:
Logits. 2-D float Tensor.
Auxiliary Logits. 2-D float Tensor of side-head. Used for training only. |
def _prepPointsForSegments(points):
while 1:
point = points[-1]
if point.segmentType:
break
else:
point = points.pop()
points.insert(0, point)
continue
break | Move any off curves at the end of the contour
to the beginning of the contour. This makes
segmentation easier. |
async def publish_changes(self, zone, changes):
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id'] | Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change. |
def _get_main_and_json(directory):
directory = os.path.normpath(os.path.abspath(directory))
checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl"))
if checker_main and os.path.exists(checker_main):
main_cwl = [checker_main]
else:
main_cwl = glob.glob(os.path.join(directory, "main-*.cwl"))
main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0]
assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory
main_json = glob.glob(os.path.join(directory, "main-*-samples.json"))
assert len(main_json) == 1, "Did not find main json in %s" % directory
project_name = os.path.basename(directory).split("-workflow")[0]
return main_cwl[0], main_json[0], project_name | Retrieve the main CWL and sample JSON files from a bcbio generated directory. |
def save(name, data, rc_file='~/.odoorpcrc'):
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
if not conf.has_section(name):
conf.add_section(name)
for key in data:
value = data[key]
conf.set(name, key, str(value))
with open(os.path.expanduser(rc_file), 'w') as file_:
os.chmod(os.path.expanduser(rc_file), stat.S_IREAD | stat.S_IWRITE)
conf.write(file_) | Save the `data` session configuration under the name `name`
in the `rc_file` file.
>>> import odoorpc
>>> odoorpc.session.save(
... 'foo',
... {'type': 'ODOO', 'host': 'localhost', 'protocol': 'jsonrpc',
... 'port': 8069, 'timeout': 120, 'database': 'db_name'
... 'user': 'admin', 'passwd': 'password'}) # doctest: +SKIP
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoorpc.session.save(
... session,
... {'type': 'ODOO', 'host': HOST, 'protocol': PROTOCOL,
... 'port': PORT, 'timeout': 120, 'database': DB,
... 'user': USER, 'passwd': PWD}) |
def servers(self, server='api.telldus.com', port=http.HTTPS_PORT):
logging.debug("Fetching server list from %s:%d", server, port)
conn = http.HTTPSConnection(server, port, context=self.ssl_context())
conn.request('GET', "/server/assign?protocolVersion=2")
response = conn.getresponse()
if response.status != http.OK:
raise RuntimeError("Could not connect to {}:{}: {} {}".format(
server, port, response.status, response.reason))
servers = []
def extract_servers(name, attributes):
if name == "server":
servers.append((attributes['address'],
int(attributes['port'])))
parser = expat.ParserCreate()
parser.StartElementHandler = extract_servers
parser.ParseFile(response)
logging.debug("Found %d available servers", len(servers))
return servers | Fetch list of servers that can be connected to.
:return: list of (address, port) tuples |
def transform_bbox(bbox, target_crs):
warnings.warn("This function is deprecated, use BBox.transform method instead", DeprecationWarning, stacklevel=2)
return bbox.transform(target_crs) | Maps bbox from current crs to target_crs
:param bbox: bounding box
:type bbox: geometry.BBox
:param target_crs: target CRS
:type target_crs: constants.CRS
:return: bounding box in target CRS
:rtype: geometry.BBox |
def validate(self):
try:
resp = self.request().get(self.validate_url, verify=self.verifySSL).json()
except TokenExpiredError:
return False
except AttributeError:
return False
if 'error' in resp:
return False
return True | Confirms the current token is still valid.
Returns True if it is valid, False otherwise. |
def reconstruct_files(input_dir):
input_dir = input_dir.rstrip('/')
with nl.notify('Attempting to organize/reconstruct directory'):
for r,ds,fs in os.walk(input_dir):
for f in fs:
if f[0]=='.':
shutil.move(os.path.join(r,f),os.path.join(r,'i'+f))
nl.dicom.organize_dir(input_dir)
output_dir = '%s-sorted' % input_dir
if os.path.exists(output_dir):
with nl.run_in(output_dir):
for dset_dir in os.listdir('.'):
with nl.notify('creating dataset from %s' % dset_dir):
nl.dicom.create_dset(dset_dir)
else:
nl.notify('Warning: failed to auto-organize directory %s' % input_dir,level=nl.level.warning) | sorts ``input_dir`` and tries to reconstruct the subdirectories found |
def _pwl1_to_poly(self, generators):
for g in generators:
if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2):
g.pwl_to_poly()
return generators | Converts single-block piecewise-linear costs into linear
polynomial. |
def unpack_classical_reg(c):
if isinstance(c, list) or isinstance(c, tuple):
if len(c) > 2 or len(c) == 0:
raise ValueError("if c is a list/tuple, it should be of length <= 2")
if len(c) == 1:
c = (c[0], 0)
if not isinstance(c[0], str):
raise ValueError("if c is a list/tuple, its first member should be a string")
if not isinstance(c[1], int):
raise ValueError("if c is a list/tuple, its second member should be an int")
return MemoryReference(c[0], c[1])
if isinstance(c, MemoryReference):
return c
elif isinstance(c, str):
return MemoryReference(c, 0)
else:
raise TypeError("c should be a list of length 2, a pair, a string, or a MemoryReference") | Get the address for a classical register.
:param c: A list of length 2, a pair, a string (to be interpreted as name[0]), or a MemoryReference.
:return: The address as a MemoryReference. |
def configfile_from_path(path, strict=True):
extension = path.split('.')[-1]
conf_type = FILE_TYPES.get(extension)
if not conf_type:
raise exc.UnrecognizedFileExtension(
"Cannot parse file of type {0}. Choices are {1}.".format(
extension,
FILE_TYPES.keys(),
)
)
return conf_type(path=path, strict=strict) | Get a ConfigFile object based on a file path.
This method will inspect the file extension and return the appropriate
ConfigFile subclass initialized with the given path.
Args:
path (str): The file path which represents the configuration file.
strict (bool): Whether or not to parse the file in strict mode.
Returns:
confpy.loaders.base.ConfigurationFile: The subclass which is
specialized for the given file path.
Raises:
UnrecognizedFileExtension: If there is no loader for the path. |
def getComponentName(self, pchRenderModelName, unComponentIndex, pchComponentName, unComponentNameLen):
fn = self.function_table.getComponentName
result = fn(pchRenderModelName, unComponentIndex, pchComponentName, unComponentNameLen)
return result | Use this to get the names of available components. Index does not correlate to a tracked device index, but
is only used for iterating over all available components. If the index is out of range, this function will return 0.
Otherwise, it will return the size of the buffer required for the name. |
def parse_args(self, args=None, namespace=None):
assert self.initialized, '`init` must be called before `parse_args`.'
namespace = self.parser.parse_args(args, namespace)
handler = self._get_handler(namespace, remove_handler=True)
if handler:
return handler(**vars(namespace)) | Parse the command-line arguments and call the associated handler.
The signature is the same as `argparse.ArgumentParser.parse_args
<https://docs.python.org/2/library/argparse.html#argparse.ArgumentParser.parse_args>`_.
Args
----
args : list
A list of argument strings. If ``None`` the list is taken from
``sys.argv``.
namespace : argparse.Namespace
A Namespace instance. Defaults to a new empty Namespace.
Returns
-------
The return value of the handler called with the populated Namespace as
kwargs. |
def ground_height(self):
lat = self.pkt['I105']['Lat']['val']
lon = self.pkt['I105']['Lon']['val']
global ElevationMap
ret = ElevationMap.GetElevation(lat, lon)
ret -= gen_settings.wgs84_to_AMSL
return ret * 3.2807 | return height above ground in feet |
def filter_line(line: str, context: RunContext) -> typing.Optional[str]:
if context.filters is not None:
for filter_ in context.filters:
if re.match(filter_, line):
return None
return line | Filters out lines that match a given regex
:param line: line to filter
:type line: str
:param context: run context
:type context: _RunContext
:return: line if it doesn't match the filter
:rtype: optional str |
def _update_pi_vars(self):
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for r in range(self.nsites):
self.pi_codon[r] = self.pi[r][CODON_TO_AA]
pim = scipy.tile(self.pi_codon[r], (N_CODON, 1))
self.piAx_piAy[r] = pim.transpose() / pim
self.ln_pi_codon = scipy.log(self.pi_codon)
self.piAx_piAy_beta = self.piAx_piAy**self.beta
self.ln_piAx_piAy_beta = scipy.log(self.piAx_piAy_beta) | Update variables that depend on `pi`.
These are `pi_codon`, `ln_pi_codon`, `piAx_piAy`, `piAx_piAy_beta`,
`ln_piAx_piAy_beta`.
Update using current `pi` and `beta`. |
def gopro_get_request_send(self, target_system, target_component, cmd_id, force_mavlink1=False):
return self.send(self.gopro_get_request_encode(target_system, target_component, cmd_id), force_mavlink1=force_mavlink1) | Request a GOPRO_COMMAND response from the GoPro
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
cmd_id : Command ID (uint8_t) |
def count_nonzero(data, mapper=None, blen=None, storage=None,
create='array', **kwargs):
return reduce_axis(data, reducer=np.count_nonzero,
block_reducer=np.add, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs) | Count the number of non-zero elements. |
async def _load_all_nodes(self):
get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx)
await get_all_nodes_information.do_api_call()
if not get_all_nodes_information.success:
raise PyVLXException("Unable to retrieve node information")
self.clear()
for notification_frame in get_all_nodes_information.notification_frames:
node = convert_frame_to_node(self.pyvlx, notification_frame)
if node is not None:
self.add(node) | Load all nodes via API. |
def cylindrical(cls, mag, theta, z=0):
return cls(
mag * math.cos(theta),
mag * math.sin(theta),
z
) | Returns a Vector instance from cylindircal coordinates |
async def delete_pairwise(self, their_did: str) -> None:
LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did)
if not ok_did(their_did):
LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did)
raise BadIdentifier('Bad DID {}'.format(their_did))
await self.delete_non_secret(TYPE_PAIRWISE, their_did)
LOGGER.debug('Wallet.delete_pairwise <<<') | Remove a pairwise DID record by its remote DID. Silently return if no such record is present.
Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID.
:param their_did: remote DID marking pairwise DID to remove |
def send_message(self,message):
if self._state == STATE_DISCONNECTED:
raise Exception("WAMP is currently disconnected!")
message = message.as_str()
logger.debug("SND>: {}".format(message))
if not self.ws:
raise Exception("WAMP is currently disconnected!")
self.ws.send(message) | Send awamp message to the server. We don't wait
for a response here. Just fire out a message |
def freeze(self):
if self.value is None:
self.value = ""
if self.dialect in [DIALECT_ALTREE]:
name_tuple = parse_name_altree(self)
elif self.dialect in [DIALECT_MYHERITAGE]:
name_tuple = parse_name_myher(self)
elif self.dialect in [DIALECT_ANCESTRIS]:
name_tuple = parse_name_ancestris(self)
else:
name_tuple = split_name(self.value)
self.value = name_tuple
return self | Method called by parser when updates to this record finish.
:return: self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.