code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def set_colors(self, buf): """ DEPRECATED: use self.color_list Use with extreme caution! Directly sets the internal buffer and bypasses all brightness and rotation control buf must also be in the exact format required by the display type. """ deprecated.deprecated('layout.set_colors') if len(self._colors) != len(buf): raise IOError("Data buffer size incorrect! " "Expected: {} bytes / Received: {} bytes" .format(len(self._colors), len(buf))) self._colors[:] = buf
DEPRECATED: use self.color_list Use with extreme caution! Directly sets the internal buffer and bypasses all brightness and rotation control buf must also be in the exact format required by the display type.
def f0Morph(fromWavFN, pitchPath, stepList, outputName, doPlotPitchSteps, fromPitchData, toPitchData, outputMinPitch, outputMaxPitch, praatEXE, keepPitchRange=False, keepAveragePitch=False, sourcePitchDataList=None, minIntervalLength=0.3): ''' Resynthesizes the pitch track from a source to a target wav file fromPitchData and toPitchData should be segmented according to the portions that you want to morph. The two lists must have the same number of sublists. Occurs over a three-step process. This function can act as a template for how to use the function morph_sequence.morphChunkedDataLists to morph pitch contours or other data. By default, everything is morphed, but it is possible to maintain elements of the original speaker's pitch (average pitch and pitch range) by setting the appropriate flag) sourcePitchDataList: if passed in, any regions unspecified by fromPitchData will be sampled from this list. In essence, this allows one to leave segments of the original pitch contour untouched by the morph process. ''' fromDuration = audio_scripts.getSoundFileDuration(fromWavFN) # Find source pitch samples that will be mixed in with the target # pitch samples later nonMorphPitchData = [] if sourcePitchDataList is not None: timeList = sorted(fromPitchData) timeList = [(row[0][0], row[-1][0]) for row in timeList] endTime = sourcePitchDataList[-1][0] invertedTimeList = praatio_utils.invertIntervalList(timeList, endTime) invertedTimeList = [(start, stop) for start, stop in invertedTimeList if stop - start > minIntervalLength] for start, stop in invertedTimeList: pitchList = praatio_utils.getValuesInInterval(sourcePitchDataList, start, stop) nonMorphPitchData.extend(pitchList) # Iterative pitch tier data path pitchTierPath = join(pitchPath, "pitchTiers") resynthesizedPath = join(pitchPath, "f0_resynthesized_wavs") for tmpPath in [pitchTierPath, resynthesizedPath]: utils.makeDir(tmpPath) # 1. Prepare the data for morphing - acquire the segments to merge # (Done elsewhere, with the input fed into this function) # 2. Morph the fromData to the toData try: finalOutputList = morph_sequence.morphChunkedDataLists(fromPitchData, toPitchData, stepList) except IndexError: raise MissingPitchDataException() fromPitchData = [row for subList in fromPitchData for row in subList] toPitchData = [row for subList in toPitchData for row in subList] # 3. Save the pitch data and resynthesize the pitch mergedDataList = [] for i in range(0, len(finalOutputList)): outputDataList = finalOutputList[i] if keepPitchRange is True: outputDataList = morph_sequence.morphRange(outputDataList, fromPitchData) if keepAveragePitch is True: outputDataList = morph_sequence.morphAveragePitch(outputDataList, fromPitchData) if sourcePitchDataList is not None: outputDataList.extend(nonMorphPitchData) outputDataList.sort() stepOutputName = "%s_%0.3g" % (outputName, stepList[i]) pitchFNFullPath = join(pitchTierPath, "%s.PitchTier" % stepOutputName) outputFN = join(resynthesizedPath, "%s.wav" % stepOutputName) pointObj = dataio.PointObject2D(outputDataList, dataio.PITCH, 0, fromDuration) pointObj.save(pitchFNFullPath) outputTime, outputVals = zip(*outputDataList) mergedDataList.append((outputTime, outputVals)) praat_scripts.resynthesizePitch(praatEXE, fromWavFN, pitchFNFullPath, outputFN, outputMinPitch, outputMaxPitch) # 4. (Optional) Plot the generated contours if doPlotPitchSteps: fromTime, fromVals = zip(*fromPitchData) toTime, toVals = zip(*toPitchData) plot_morphed_data.plotF0((fromTime, fromVals), (toTime, toVals), mergedDataList, join(pitchTierPath, "%s.png" % outputName))
Resynthesizes the pitch track from a source to a target wav file fromPitchData and toPitchData should be segmented according to the portions that you want to morph. The two lists must have the same number of sublists. Occurs over a three-step process. This function can act as a template for how to use the function morph_sequence.morphChunkedDataLists to morph pitch contours or other data. By default, everything is morphed, but it is possible to maintain elements of the original speaker's pitch (average pitch and pitch range) by setting the appropriate flag) sourcePitchDataList: if passed in, any regions unspecified by fromPitchData will be sampled from this list. In essence, this allows one to leave segments of the original pitch contour untouched by the morph process.
def is_reconstructed(self, xy_cutoff=0.3, z_cutoff=0.4): """Compare initial and final slab configuration to determine if slab reconstructs during relaxation xy_cutoff: Allowed xy-movement is determined from the covalent radii as: xy_cutoff * np.mean(cradii) z_cutoff: Allowed z-movement is determined as z_cutoff * cradii_i """ assert self.A, \ 'Initial slab geometry needed to classify reconstruction' # remove adsorbate A = self.A[:-1].copy() B = self.B[:-1].copy() # Order wrt x-positions x_indices = np.argsort(A.positions[:, 0]) A = A[x_indices] B = B[x_indices] a = A.positions b = B.positions allowed_z_movement = z_cutoff * cradii[A.get_atomic_numbers()] allowed_xy_movement = \ xy_cutoff * np.mean(cradii[A.get_atomic_numbers()]) D, D_len = get_distances(p1=a, p2=b, cell=A.cell, pbc=True) d_xy = np.linalg.norm(np.diagonal(D)[:2], axis=0) d_z = np.diagonal(D)[2:][0] cond1 = np.all(d_xy < allowed_xy_movement) cond2 = np.all([d_z[i] < allowed_z_movement[i] for i in range(len(a))]) if cond1 and cond2: # not reconstructed return False else: return True
Compare initial and final slab configuration to determine if slab reconstructs during relaxation xy_cutoff: Allowed xy-movement is determined from the covalent radii as: xy_cutoff * np.mean(cradii) z_cutoff: Allowed z-movement is determined as z_cutoff * cradii_i
def to_args(self): """Return command arguments for this object. Return a list of the non-default options of this ``SoSOptions`` object in ``sosreport`` command line argument notation: ``["--all-logs", "-vvv"]`` """ def has_value(name, value): """ Test for non-null option values. """ null_values = ("False", "None", "[]", '""', "''", "0") if not value or value in null_values: return False if name in _arg_defaults: if str(value) == str(_arg_defaults[name]): return False return True def filter_opt(name, value): """ Filter out preset and null-valued options. """ if name in ("add_preset", "del_preset", "desc", "note"): return False return has_value(name, value) def argify(name, value): """ Convert sos option notation to command line arguments. """ # Handle --verbosity specially if name.startswith("verbosity"): arg = "-" + int(value) * "v" return arg name = name.replace("_", "-") value = ",".join(value) if _is_seq(value) else value if value is not True: opt = "%s %s" % (name, value) else: opt = name arg = "--" + opt if len(opt) > 1 else "-" + opt return arg opt_items = sorted(self.dict().items(), key=lambda x: x[0]) return [argify(n, v) for (n, v) in opt_items if filter_opt(n, v)]
Return command arguments for this object. Return a list of the non-default options of this ``SoSOptions`` object in ``sosreport`` command line argument notation: ``["--all-logs", "-vvv"]``
def _update_tcs_helper_catalogue_tables_info_with_new_tables( self): """update tcs helper catalogue tables info with new tables .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug( 'starting the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method') sqlQuery = u""" SELECT max(id) as thisId FROM tcs_helper_catalogue_tables_info; """ % locals() thisId = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) try: highestId = thisId[0]["thisId"] + 1 except: highestId = 1 sqlQuery = u""" SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_SCHEMA like '%%catalogues%%' and TABLE_NAME like "tcs_cat%%"; """ % locals() tablesInDatabase = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) sqlQuery = u""" SELECT table_name, old_table_name FROM tcs_helper_catalogue_tables_info; """ % locals() tableList = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) tbList = [] oldList = [] for tb in tableList: oldList.append(tb["old_table_name"]) for tb in tableList: if tb["old_table_name"] not in tbList: tbList.append(tb["old_table_name"]) if tb["table_name"] not in tbList: tbList.append(tb["table_name"]) for tb in tablesInDatabase: if tb["TABLE_NAME"] not in tbList: thisTableName = tb["TABLE_NAME"] print "`%(thisTableName)s` added to `tcs_helper_catalogue_tables_info` database table" % locals() sqlQuery = u""" INSERT INTO tcs_helper_catalogue_tables_info ( id, table_name ) VALUES ( %(highestId)s, "%(thisTableName)s" )""" % locals() writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, ) highestId += 1 self.log.debug( 'completed the ``_update_tcs_helper_catalogue_tables_info_with_new_tables`` method') return None
update tcs helper catalogue tables info with new tables .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
def convert_hpdist(self, node): """ Convert the given node into a probability mass function for the hypo depth distribution. :param node: a hypoDepthDist node :returns: a :class:`openquake.hazardlib.pmf.PMF` instance """ with context(self.fname, node): hcdist = [(hd['probability'], hd['depth']) for hd in node.hypoDepthDist] if not self.spinning_floating: # consider the first hypocenter hcdist = [(1, hcdist[0][1])] return pmf.PMF(hcdist)
Convert the given node into a probability mass function for the hypo depth distribution. :param node: a hypoDepthDist node :returns: a :class:`openquake.hazardlib.pmf.PMF` instance
def info(self, text): """ Ajout d'un message de log de type INFO """ self.logger.info("{}{}".format(self.message_prefix, text))
Ajout d'un message de log de type INFO
def _set_metric(self, metric_name, metric_type, value, tags=None): """ Set a metric """ if metric_type == self.GAUGE: self.gauge(metric_name, value, tags=tags) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
Set a metric
def import_file(self, taskfileinfo): """Import the file for the given taskfileinfo This will also update the status to :data:`Reftrack.IMPORTED`. This will also call :meth:`fetch_new_children`. Because after the import, we might have new children. :param taskfileinfo: the taskfileinfo to import. If None is given, try to import the current reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` | None :returns: None :rtype: None :raises: :class:`ReftrackIntegrityError` """ assert self.status() is None,\ "Entity is already in scene. Use replace instead." refobjinter = self.get_refobjinter() refobj = self.create_refobject() with self.set_parent_on_new(refobj): refobjinter.import_taskfile(refobj, taskfileinfo) self.set_refobj(refobj) self.set_status(self.IMPORTED) self.fetch_new_children() self.update_restrictions() self.emit_data_changed()
Import the file for the given taskfileinfo This will also update the status to :data:`Reftrack.IMPORTED`. This will also call :meth:`fetch_new_children`. Because after the import, we might have new children. :param taskfileinfo: the taskfileinfo to import. If None is given, try to import the current reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` | None :returns: None :rtype: None :raises: :class:`ReftrackIntegrityError`
def submit(self, fn, *args, **kwargs): """Submit an operation""" if not self.asynchronous: return fn(*args, **kwargs) raise NotImplementedError
Submit an operation
def unkown_field(self, value=None): """Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `unkown_field`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `unkown_field`') self._unkown_field = value
Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def PublishableBox(publishable, box_type, nodelist, model=None): "add some content type info of self.target" if not model: model = publishable.content_type.model_class() box_class = model.box_class if box_class == PublishableBox: box_class = Box return box_class(publishable, box_type, nodelist, model=model)
add some content type info of self.target
def draw_line(self, x1, y1, x2, y2): """Draw a line on the current rendering target. Args: x1 (int): The x coordinate of the start point. y1 (int): The y coordinate of the start point. x2 (int): The x coordinate of the end point. y2 (int): The y coordinate of the end point. Raises: SDLError: If an error is encountered. """ check_int_err(lib.SDL_RenderDrawLine(self._ptr, x1, y1, x2, y2))
Draw a line on the current rendering target. Args: x1 (int): The x coordinate of the start point. y1 (int): The y coordinate of the start point. x2 (int): The x coordinate of the end point. y2 (int): The y coordinate of the end point. Raises: SDLError: If an error is encountered.
def _combine_ranges(ranges): """ This function takes a list of row-ranges (as returned by `_parse_row`) ordered by rows, and produces a list of distinct rectangular ranges within this grid. Within this function we define a 2d-range as a rectangular set of cells such that: - there are no empty rows / columns within this rectangle; - the rectangle is surrounded by empty rows / columns on all sides; - no subset of this rectangle comprises a valid 2d-range; - separate 2d-ranges are allowed to touch at a corner. """ ranges2d = [] for irow, rowranges in enumerate(ranges): ja = 0 jb = 0 while jb < len(rowranges): bcol0, bcol1 = rowranges[jb] if ja < len(ranges2d): _, arow1, acol0, acol1 = ranges2d[ja] if arow1 < irow: ja += 1 continue assert arow1 == irow or arow1 == irow + 1 else: acol0 = acol1 = 1000000000 if bcol0 == acol0 and bcol1 == acol1: ranges2d[ja][1] = irow + 1 ja += 1 jb += 1 elif bcol1 <= acol0: ranges2d.insert(ja, [irow, irow + 1, bcol0, bcol1]) ja += 1 jb += 1 elif bcol0 >= acol1: ja += 1 else: assert ja < len(ranges2d) ranges2d[ja][1] = irow + 1 if bcol0 < acol0: ranges2d[ja][2] = bcol0 if bcol1 > acol1: ranges2d[ja][3] = acol1 = bcol1 ja = _collapse_ranges(ranges2d, ja) jb += 1 return ranges2d
This function takes a list of row-ranges (as returned by `_parse_row`) ordered by rows, and produces a list of distinct rectangular ranges within this grid. Within this function we define a 2d-range as a rectangular set of cells such that: - there are no empty rows / columns within this rectangle; - the rectangle is surrounded by empty rows / columns on all sides; - no subset of this rectangle comprises a valid 2d-range; - separate 2d-ranges are allowed to touch at a corner.
def load_lc_data(filename, indep, dep, indweight=None, mzero=None, dir='./'): """ load dictionary with lc data """ if '/' in filename: path, filename = os.path.split(filename) else: # TODO: this needs to change to be directory of the .phoebe file path = dir load_file = os.path.join(path, filename) lcdata = np.loadtxt(load_file) ncol = len(lcdata[0]) if dep == 'Magnitude': mag = lcdata[:,1] flux = 10**(-0.4*(mag-mzero)) lcdata[:,1] = flux d = {} d['phoebe_lc_time'] = lcdata[:,0] d['phoebe_lc_flux'] = lcdata[:,1] if indweight=="Standard deviation": if ncol >= 3: d['phoebe_lc_sigmalc'] = lcdata[:,2] else: logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file') elif indweight =="Standard weight": if ncol >= 3: sigma = np.sqrt(1/lcdata[:,2]) d['phoebe_lc_sigmalc'] = sigma logger.warning('Standard weight has been converted to Standard deviation.') else: logger.warning('A sigma column was mentioned in the .phoebe file but is not present in the lc data file') else: logger.warning('Phoebe 2 currently only supports standard deviaton') # dataset.set_value(check_visible=False, **d) return d
load dictionary with lc data
def diff_stats(self, ids): """Compute diff stats for a set of results. :param id: Result IDs as int list. :return: :class:`results.DiffStats <results.DiffStats>` object :rtype: results.DiffStats """ schema = DiffStatsSchema() resp = self.service.post(self.base, params={'stats': 'diff'}, json=[{'id': str(x)} for x in ids]) return self.service.decode(schema, resp)
Compute diff stats for a set of results. :param id: Result IDs as int list. :return: :class:`results.DiffStats <results.DiffStats>` object :rtype: results.DiffStats
def _get_setter_fun(object_type, # type: Type parameter, # type: Parameter private_property_name # type: str ): """ Utility method to find the overridden setter function for a given property, or generate a new one :param object_type: :param property_name: :param property_type: :param private_property_name: :return: """ # the property will have the same name than the constructor argument property_name = parameter.name overridden_setters = getmembers(object_type, _has_annotation(__SETTER_OVERRIDE_ANNOTATION, property_name)) if len(overridden_setters) > 0: # --check that we only have one if len(overridden_setters) > 1: raise DuplicateOverrideError('Setter is overridden more than once for attribute name : %s' % property_name) # --use the overridden setter setter_fun = overridden_setters[0][1] try: # python 2 setter_fun = setter_fun.im_func except AttributeError: pass # --find the parameter name and check the signature s = signature(setter_fun) p = [attribute_name for attribute_name, param in s.parameters.items() if attribute_name is not 'self'] if len(p) != 1: try: qname = setter_fun.__qualname__ except AttributeError: qname = setter_fun.__name__ raise IllegalSetterSignatureException('overridden setter must have only 1 non-self argument, found ' + '%s for function %s' '' % (len(s.parameters.items()) - 1, qname)) var_name = p[0] else: # --create the setter, equivalent of: # ** Dynamically compile a wrapper with correct argument name ** sig = Signature(parameters=[Parameter('self', kind=Parameter.POSITIONAL_OR_KEYWORD), parameter]) @with_signature(sig) def autoprops_generated_setter(self, **kwargs): setattr(self, private_property_name, kwargs.popitem()[1]) setter_fun = autoprops_generated_setter var_name = property_name return setter_fun, var_name
Utility method to find the overridden setter function for a given property, or generate a new one :param object_type: :param property_name: :param property_type: :param private_property_name: :return:
def encodeDeltas(self, dx,dy): """Return the SDR for dx,dy""" dxe = self.dxEncoder.encode(dx) dye = self.dyEncoder.encode(dy) ex = numpy.outer(dxe,dye) return ex.flatten().nonzero()[0]
Return the SDR for dx,dy
async def close(self, timeout=5) -> None: """Stop a ffmpeg instance.""" if not self.is_running: _LOGGER.warning("FFmpeg isn't running!") return # Can't use communicate because we attach the output to a streamreader def _close(): """Close ffmpeg.""" self._proc.stdin.write(b"q") self._proc.wait(timeout=timeout) # send stop to ffmpeg try: await self._loop.run_in_executor(None, _close) _LOGGER.debug("Close FFmpeg process") except (subprocess.TimeoutExpired, ValueError): _LOGGER.warning("Timeout while waiting of FFmpeg") self.kill() finally: self._clear()
Stop a ffmpeg instance.
def main(raw_args=None): """Run the iotile-emulate script. Args: raw_args (list): Optional list of commmand line arguments. If not passed these are pulled from sys.argv. """ if raw_args is None: raw_args = sys.argv[1:] parser = build_parser() args = parser.parse_args(raw_args) if args.firmware_image is None and args.gdb is None: print("You must specify either a firmware image or attach a debugger with --gdb <PORT>") return 1 test_args = ['qemu-system-gnuarmeclipse', '-verbose', '-verbose', '-board', 'STM32F0-Discovery', '-nographic', '-monitor', 'null', '-serial', 'null', '--semihosting-config', 'enable=on,target=native', '-d', 'unimp,guest_errors'] if args.firmware_image: test_args += ['-image', args.firmware_image] if args.gdb: test_args += ['--gdb', 'tcp::%d' % args.gdb] proc = subprocess.Popen(test_args, stdout=sys.stdout, stderr=sys.stderr) try: proc.communicate() except KeyboardInterrupt: proc.terminate() return 0
Run the iotile-emulate script. Args: raw_args (list): Optional list of commmand line arguments. If not passed these are pulled from sys.argv.
def run(self): """Threading callback""" self.viewing = True while self.viewing and self._lock.acquire(): try: line = self._readline() except: pass else: logger.info(line) self._lock.release() time.sleep(0)
Threading callback
def write_yaml(data, out_path, Dumper=yaml.Dumper, **kwds): '''Write python dictionary to YAML''' import errno import os class OrderedDumper(Dumper): pass def _dict_representer(dumper, data): return dumper.represent_mapping( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) OrderedDumper.add_representer(OrderedDict, _dict_representer) # Make directory for calibration file if does not exist base_path, file_name = os.path.split(out_path) try: os.makedirs(base_path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(base_path): pass else: raise # Write dictionary to YAML with open(out_path, 'w') as f: return yaml.dump(data, f, OrderedDumper, default_flow_style=False, **kwds)
Write python dictionary to YAML
def __set_style_sheet(self): """ Sets the Widget stylesheet. """ colors = map( lambda x: "rgb({0}, {1}, {2}, {3})".format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)), (self.__color, self.__background_color, self.__border_color)) self.setStyleSheet(self.__style.format(*colors))
Sets the Widget stylesheet.
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: TSKPartitionFileEntry: a file entry or None of not available. """ tsk_vs_part, partition_index = tsk_partition.GetTSKVsPartByPathSpec( self._tsk_volume, path_spec) location = getattr(path_spec, 'location', None) # The virtual root file has not corresponding TSK volume system part object # but should have a location. if tsk_vs_part is None: if location is None or location != self.LOCATION_ROOT: return None return tsk_partition_file_entry.TSKPartitionFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) if location is None and partition_index is not None: path_spec.location = '/p{0:d}'.format(partition_index) return tsk_partition_file_entry.TSKPartitionFileEntry( self._resolver_context, self, path_spec)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: TSKPartitionFileEntry: a file entry or None of not available.
def singleton(cls): """ 单例模式的装饰器: 在需要单例的类定义上加 @singleton 即可 """ INSTANCES = {} def _singleton(*args, **kwargs): if cls not in INSTANCES: INSTANCES[cls] = cls(*args, **kwargs) return INSTANCES[cls] return _singleton
单例模式的装饰器: 在需要单例的类定义上加 @singleton 即可
def _write_triggers(self, fh, triggers, indent=""): """Write triggers to a file handle. Parameters: fh (file): file object. triggers (list): list of triggers to write. indent (str): indentation for each line. """ for trig in triggers: fh.write(indent + "+ " + self._write_wrapped(trig["trigger"], indent=indent) + "\n") d = trig if d.get("previous"): fh.write(indent + "% " + self._write_wrapped(d["previous"], indent=indent) + "\n") for cond in d["condition"]: fh.write(indent + "* " + self._write_wrapped(cond, indent=indent) + "\n") if d.get("redirect"): fh.write(indent + "@ " + self._write_wrapped(d["redirect"], indent=indent) + "\n") for reply in d["reply"]: fh.write(indent + "- " + self._write_wrapped(reply, indent=indent) + "\n") fh.write("\n")
Write triggers to a file handle. Parameters: fh (file): file object. triggers (list): list of triggers to write. indent (str): indentation for each line.
def canvasPressEvent(self, e): """ Handle canvas press events so we know when user is capturing the rect. :param e: A Qt event object. :type: QEvent """ self.start_point = self.toMapCoordinates(e.pos()) self.end_point = self.start_point self.is_emitting_point = True self.show_rectangle(self.start_point, self.end_point)
Handle canvas press events so we know when user is capturing the rect. :param e: A Qt event object. :type: QEvent
def push(self, message, device=None, title=None, url=None, url_title=None, priority=None, timestamp=None, sound=None): """Pushes the notification, returns the Requests response. Arguments: message -- your message Keyword arguments: device -- your user's device name to send the message directly to that device, rather than all of the user's devices title -- your message's title, otherwise your app's name is used url -- a supplementary URL to show with your message url_title -- a title for your supplementary URL, otherwise just the URL is shown priority -- send as --1 to always send as a quiet notification, 1 to display as high--priority and bypass the user's quiet hours, or 2 to also require confirmation from the user timestamp -- a Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API sound -- the name of one of the sounds supported by device clients to override the user's default sound choice. """ api_url = 'https://api.pushover.net/1/messages.json' payload = { 'token': self.api_token, 'user': self.user, 'message': message, 'device': device, 'title': title, 'url': url, 'url_title': url_title, 'priority': priority, 'timestamp': timestamp, 'sound': sound } return requests.post(api_url, params=payload)
Pushes the notification, returns the Requests response. Arguments: message -- your message Keyword arguments: device -- your user's device name to send the message directly to that device, rather than all of the user's devices title -- your message's title, otherwise your app's name is used url -- a supplementary URL to show with your message url_title -- a title for your supplementary URL, otherwise just the URL is shown priority -- send as --1 to always send as a quiet notification, 1 to display as high--priority and bypass the user's quiet hours, or 2 to also require confirmation from the user timestamp -- a Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API sound -- the name of one of the sounds supported by device clients to override the user's default sound choice.
def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return: ''' components = [component] if component != "*" else (yield get_comps(cluster, environ, topology)) result = {} futures = [] for comp in components: query = self.get_query(metric, comp, instance) max_query = "MAX(%s)" % query future = get_metrics(cluster, environ, topology, timerange, max_query) futures.append(future) results = yield futures data = self.compute_max(results) result = self.get_metric_response(timerange, data, True) raise tornado.gen.Return(result)
:param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return:
def _clear(self): """Resets all assigned data for the current message.""" self._finished = False self._measurement = None self._message = None self._message_body = None
Resets all assigned data for the current message.
def get_resultsets(self, routine, *args): """Return a list of lists of dictionaries, for when a query returns more than one resultset. """ (query, replacements) = self.__build_raw_query(routine, args) # Grab a raw connection from the connection-pool. connection = mm.db.ENGINE.raw_connection() sets = [] try: cursor = connection.cursor() cursor.execute(query, replacements) while 1: #(column_name, type_, ignore_, ignore_, ignore_, null_ok, column_flags) names = [c[0] for c in cursor.description] set_ = [] while 1: row_raw = cursor.fetchone() if row_raw is None: break row = dict(zip(names, row_raw)) set_.append(row) sets.append(list(set_)) if cursor.nextset() is None: break # TODO(dustin): nextset() doesn't seem to be sufficiant to tell the end. if cursor.description is None: break finally: # Return the connection to the pool (won't actually close). connection.close() return sets
Return a list of lists of dictionaries, for when a query returns more than one resultset.
def com_google_fonts_check_monospace_max_advancewidth(ttFont, glyph_metrics_stats): """Monospace font has hhea.advanceWidthMax equal to each glyph's advanceWidth?""" from fontbakery.utils import pretty_print_list seems_monospaced = glyph_metrics_stats["seems_monospaced"] if not seems_monospaced: yield SKIP, ("Font is not monospaced.") return # hhea:advanceWidthMax is treated as source of truth here. max_advw = ttFont['hhea'].advanceWidthMax outliers = [] zero_or_double_width_outliers = [] glyphSet = ttFont.getGlyphSet().keys() # TODO: remove .keys() when fonttools is updated to 3.27 glyphs = [ g for g in glyphSet if g not in ['.notdef', '.null', 'NULL'] ] for glyph_id in glyphs: width = ttFont['hmtx'].metrics[glyph_id][0] if width != max_advw: outliers.append(glyph_id) if width == 0 or width == 2 * max_advw: zero_or_double_width_outliers.append(glyph_id) if outliers: outliers_percentage = float(len(outliers)) / len(glyphSet) yield WARN, Message("should-be-monospaced", "This seems to be a monospaced font," " so advanceWidth value should be the same" " across all glyphs, but {}% of them" " have a different value: {}" "".format(round(100 * outliers_percentage, 2), pretty_print_list(outliers))) if zero_or_double_width_outliers: yield WARN, Message("variable-monospaced", "Double-width and/or zero-width glyphs" " were detected. These glyphs should be set" " to the same width as all others" " and then add GPOS single pos lookups" " that zeros/doubles the widths as needed:" " {}".format(pretty_print_list( zero_or_double_width_outliers))) else: yield PASS, ("hhea.advanceWidthMax is equal" " to all glyphs' advanceWidth in this monospaced font.")
Monospace font has hhea.advanceWidthMax equal to each glyph's advanceWidth?
def init_queue_for( self, queue_identifier: QueueIdentifier, items: List[QueueItem_T], ) -> NotifyingQueue: """ Create the queue identified by the queue_identifier and initialize it with `items`. """ recipient = queue_identifier.recipient queue = self.queueids_to_queues.get(queue_identifier) assert queue is None queue = NotifyingQueue(items=items) self.queueids_to_queues[queue_identifier] = queue events = self.get_health_events(recipient) greenlet_queue = gevent.spawn( single_queue_send, self, recipient, queue, queue_identifier, self.event_stop, events.event_healthy, events.event_unhealthy, self.retries_before_backoff, self.retry_interval, self.retry_interval * 10, ) if queue_identifier.channel_identifier == CHANNEL_IDENTIFIER_GLOBAL_QUEUE: greenlet_queue.name = f'Queue for {pex(recipient)} - global' else: greenlet_queue.name = ( f'Queue for {pex(recipient)} - {queue_identifier.channel_identifier}' ) greenlet_queue.link_exception(self.on_error) self.greenlets.append(greenlet_queue) self.log.debug( 'new queue created for', queue_identifier=queue_identifier, items_qty=len(items), ) return queue
Create the queue identified by the queue_identifier and initialize it with `items`.
def Davis_David(m, x, D, rhol, rhog, Cpl, kl, mul): r'''Calculates the two-phase non-boiling heat transfer coefficient of a liquid and gas flowing inside a tube of any inclination, as in [1]_ and reviewed in [2]_. .. math:: \frac{h_{TP} D}{k_l} = 0.060\left(\frac{\rho_L}{\rho_G}\right)^{0.28} \left(\frac{DG_{TP} x}{\mu_L}\right)^{0.87} \left(\frac{C_{p,L} \mu_L}{k_L}\right)^{0.4} Parameters ---------- m : float Mass flow rate [kg/s] x : float Quality at the specific tube interval [-] D : float Diameter of the tube [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Cpl : float Constant-pressure heat capacity of liquid [J/kg/K] kl : float Thermal conductivity of liquid [W/m/K] mul : float Viscosity of liquid [Pa*s] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- Developed for both vertical and horizontal flow, and flow patters of annular or mist annular flow. Steam-water and air-water were the only considered fluid combinations. Quality ranged from 0.1 to 1 in their data. [1]_ claimed an AAE of 17%. Examples -------- >>> Davis_David(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, kl=.6, ... mul=1E-3) 1437.3282869955121 References ---------- .. [1] Davis, E. J., and M. M. David. "Two-Phase Gas-Liquid Convection Heat Transfer. A Correlation." Industrial & Engineering Chemistry Fundamentals 3, no. 2 (May 1, 1964): 111-18. doi:10.1021/i160010a005. .. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L. Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with Seven Sets of Experimental Data, Including Flow Pattern and Tube Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1, 1999): 15-40. doi:10.1080/014576399271691. ''' G = m/(pi/4*D**2) Prl = Prandtl(Cp=Cpl, mu=mul, k=kl) Nu_TP = 0.060*(rhol/rhog)**0.28*(D*G*x/mul)**0.87*Prl**0.4 return Nu_TP*kl/D
r'''Calculates the two-phase non-boiling heat transfer coefficient of a liquid and gas flowing inside a tube of any inclination, as in [1]_ and reviewed in [2]_. .. math:: \frac{h_{TP} D}{k_l} = 0.060\left(\frac{\rho_L}{\rho_G}\right)^{0.28} \left(\frac{DG_{TP} x}{\mu_L}\right)^{0.87} \left(\frac{C_{p,L} \mu_L}{k_L}\right)^{0.4} Parameters ---------- m : float Mass flow rate [kg/s] x : float Quality at the specific tube interval [-] D : float Diameter of the tube [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Cpl : float Constant-pressure heat capacity of liquid [J/kg/K] kl : float Thermal conductivity of liquid [W/m/K] mul : float Viscosity of liquid [Pa*s] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- Developed for both vertical and horizontal flow, and flow patters of annular or mist annular flow. Steam-water and air-water were the only considered fluid combinations. Quality ranged from 0.1 to 1 in their data. [1]_ claimed an AAE of 17%. Examples -------- >>> Davis_David(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, kl=.6, ... mul=1E-3) 1437.3282869955121 References ---------- .. [1] Davis, E. J., and M. M. David. "Two-Phase Gas-Liquid Convection Heat Transfer. A Correlation." Industrial & Engineering Chemistry Fundamentals 3, no. 2 (May 1, 1964): 111-18. doi:10.1021/i160010a005. .. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L. Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with Seven Sets of Experimental Data, Including Flow Pattern and Tube Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1, 1999): 15-40. doi:10.1080/014576399271691.
def _set_map_(self, v, load=False): """ Setter method for map_, mapped from YANG variable /overlay_gateway/map (container) If this variable is read-only (config: false) in the source YANG file, then _set_map_ is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_map_() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=map_.map_, is_container='container', presence=False, yang_name="map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify the vlan to vni mappings for the Overlay Gateway.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """map_ must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=map_.map_, is_container='container', presence=False, yang_name="map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify the vlan to vni mappings for the Overlay Gateway.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""", }) self.__map_ = t if hasattr(self, '_set'): self._set()
Setter method for map_, mapped from YANG variable /overlay_gateway/map (container) If this variable is read-only (config: false) in the source YANG file, then _set_map_ is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_map_() directly.
def _check(user, topic): """If the topic has it's export_control set to True then all the teams under the product team can access to the topic's resources. :param user: :param topic: :return: True if check is ok, False otherwise """ # if export_control then check the team is associated to the product, ie.: # - the current user belongs to the product's team # OR # - the product's team belongs to the user's parents teams if topic['export_control']: product = v1_utils.verify_existence_and_get(topic['product_id'], models.PRODUCTS) return (user.is_in_team(product['team_id']) or product['team_id'] in user.parent_teams_ids) return False
If the topic has it's export_control set to True then all the teams under the product team can access to the topic's resources. :param user: :param topic: :return: True if check is ok, False otherwise
def CMOVS(cpu, dest, src): """ Conditional move - Sign (negative). Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest.write(Operators.ITEBV(dest.size, cpu.SF, src.read(), dest.read()))
Conditional move - Sign (negative). Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand.
def _cleanup_markers(context_id, task_ids): """Delete the FuriousAsyncMarker entities corresponding to ids.""" logging.debug("Cleanup %d markers for Context %s", len(task_ids), context_id) # TODO: Handle exceptions and retries here. delete_entities = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids] delete_entities.append(ndb.Key(FuriousCompletionMarker, context_id)) ndb.delete_multi(delete_entities) logging.debug("Markers cleaned.")
Delete the FuriousAsyncMarker entities corresponding to ids.
def open(self): """ Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ if not self.handle: try: path = self.system_dir except AttributeError: path = '' self.__handle = lvm_init(path) if not bool(self.__handle): raise HandleError("Failed to initialize LVM handle.")
Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
def get_or_create(self, qualifier, new_parameter, **kwargs): """ Get a :class:`Parameter` from the ParameterSet, if it does not exist, create and attach it. Note: running this on a ParameterSet that is NOT a :class:`phoebe.frontend.bundle.Bundle`, will NOT add the Parameter to the bundle, but only the temporary ParameterSet :parameter str qualifier: the qualifier of the :class:`Parameter` (note, not the twig) :parameter new_parameter: the parameter to attach if no result is found :type new_parameter: :class:`Parameter` :parameter **kwargs: meta-tags to search - will also be applied to new_parameter if it is attached. :return: Parameter, created :rtype: :class:`Parameter`, bool :raises ValueError: if more than 1 result was found using the search criteria. """ ps = self.filter_or_get(qualifier=qualifier, **kwargs) if isinstance(ps, Parameter): return ps, False elif len(ps): # TODO: custom exception? raise ValueError("more than 1 result was found") else: self._attach_params(ParameterSet([new_parameter]), **kwargs) logger.debug("creating and attaching new parameter: {}".format(new_parameter.qualifier)) return self.filter_or_get(qualifier=qualifier, **kwargs), True
Get a :class:`Parameter` from the ParameterSet, if it does not exist, create and attach it. Note: running this on a ParameterSet that is NOT a :class:`phoebe.frontend.bundle.Bundle`, will NOT add the Parameter to the bundle, but only the temporary ParameterSet :parameter str qualifier: the qualifier of the :class:`Parameter` (note, not the twig) :parameter new_parameter: the parameter to attach if no result is found :type new_parameter: :class:`Parameter` :parameter **kwargs: meta-tags to search - will also be applied to new_parameter if it is attached. :return: Parameter, created :rtype: :class:`Parameter`, bool :raises ValueError: if more than 1 result was found using the search criteria.
def getSoname(filename): """ Return the soname of a library. """ cmd = ["objdump", "-p", "-j", ".dynamic", filename] m = re.search(r'\s+SONAME\s+([^\s]+)', compat.exec_command(*cmd)) if m: return m.group(1)
Return the soname of a library.
def google_news_search(self,query,category_label,num=50): ''' Searches Google News. NOTE: Official Google News API is deprecated https://developers.google.com/news-search/?hl=en NOTE: Google limits the maximum number of documents per query to 100. Use multiple related queries to get a bigger corpus. Args: query (str): The search term. category_label (str): The category to assign to the articles. These categories are the labels in the generated corpus num (Optional[int]): The numnber of results to return. Returns: articles: Array of tuples that contains article link & cateogory ex. [('IPO','www.cs.columbia.edu')] ''' url = 'https://news.google.com/news?hl=en&q='+self._encode_query(query) \ +'&num='+str(num)+'&output=rss' rss = feedparser.parse(url) entries = rss['entries'] articles = [] for entry in entries: link = entry['link'] articles.append((category_label,link)) return articles
Searches Google News. NOTE: Official Google News API is deprecated https://developers.google.com/news-search/?hl=en NOTE: Google limits the maximum number of documents per query to 100. Use multiple related queries to get a bigger corpus. Args: query (str): The search term. category_label (str): The category to assign to the articles. These categories are the labels in the generated corpus num (Optional[int]): The numnber of results to return. Returns: articles: Array of tuples that contains article link & cateogory ex. [('IPO','www.cs.columbia.edu')]
def openSourceFile(self, fileToOpen): """Finds and opens the source file for link target fileToOpen. When links like [test](test) are clicked, the file test.md is opened. It has to be located next to the current opened file. Relative paths like [test](../test) or [test](folder/test) are also possible. """ if self.fileName: currentExt = splitext(self.fileName)[1] basename, ext = splitext(fileToOpen) if ext in ('.html', '') and exists(basename + currentExt): self.p.openFileWrapper(basename + currentExt) return basename + currentExt if exists(fileToOpen) and get_markup_for_file_name(fileToOpen, return_class=True): self.p.openFileWrapper(fileToOpen) return fileToOpen
Finds and opens the source file for link target fileToOpen. When links like [test](test) are clicked, the file test.md is opened. It has to be located next to the current opened file. Relative paths like [test](../test) or [test](folder/test) are also possible.
def _set_local_preference(self, v, load=False): """ Setter method for local_preference, mapped from YANG variable /routing_system/route_map/content/set/local_preference (container) If this variable is read-only (config: false) in the source YANG file, then _set_local_preference is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_local_preference() directly. YANG Description: BGP local preference path attribute """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=local_preference.local_preference, is_container='container', presence=False, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP local preference path attribute'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """local_preference must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=local_preference.local_preference, is_container='container', presence=False, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP local preference path attribute'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""", }) self.__local_preference = t if hasattr(self, '_set'): self._set()
Setter method for local_preference, mapped from YANG variable /routing_system/route_map/content/set/local_preference (container) If this variable is read-only (config: false) in the source YANG file, then _set_local_preference is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_local_preference() directly. YANG Description: BGP local preference path attribute
def pymmh3_hash128(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> int: """ Implements 128bit murmur3 hash, as per ``pymmh3``. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: integer hash """ if x64arch: return pymmh3_hash128_x64(key, seed) else: return pymmh3_hash128_x86(key, seed)
Implements 128bit murmur3 hash, as per ``pymmh3``. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: integer hash
def first_visible_line(self, after_scroll_offset=False): """ Return the line number (0 based) of the input document that corresponds with the first visible line. """ if after_scroll_offset: return self.displayed_lines[self.applied_scroll_offsets.top] else: return self.displayed_lines[0]
Return the line number (0 based) of the input document that corresponds with the first visible line.
def show(self, view: View, request: Request): """Show the welcome page. Arguments: view {masonite.view.View} -- The Masonite view class. Application {config.application} -- The application config module. Returns: masonite.view.View -- The Masonite view class. """ return view.render('welcome', { 'app': request.app().make('Application') })
Show the welcome page. Arguments: view {masonite.view.View} -- The Masonite view class. Application {config.application} -- The application config module. Returns: masonite.view.View -- The Masonite view class.
def get(self, user_id): """Returns a specific user""" user = db.User.find_one(User.user_id == user_id) roles = db.Role.all() if not user: return self.make_response('Unable to find the user requested, might have been removed', HTTP.NOT_FOUND) return self.make_response({ 'user': user.to_json(), 'roles': roles }, HTTP.OK)
Returns a specific user
def get(self, layout, default=None): """ Returns given layout value. :param layout: Layout name. :type layout: unicode :param default: Default value if layout is not found. :type default: object :return: Action. :rtype: QAction """ try: return self.__getitem__(layout) except KeyError as error: return default
Returns given layout value. :param layout: Layout name. :type layout: unicode :param default: Default value if layout is not found. :type default: object :return: Action. :rtype: QAction
def build_kalman_mean_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep): """Build a callable that performs one step of Kalman mean recursion. Args: get_transition_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[latent_size, latent_size]`. get_transition_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[latent_size]`. get_observation_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[observation_size, observation_size]`. get_observation_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[observation_size]`. Returns: kalman_mean_step: a callable that computes latent state and observation means at time `t`, given latent mean at time `t-1`. """ def mean_step(previous_means, t): """Single step of prior mean recursion.""" previous_latent_mean, _ = previous_means latent_mean = _propagate_mean(previous_latent_mean, get_transition_matrix_for_timestep(t - 1), get_transition_noise_for_timestep(t - 1)) observation_mean = _propagate_mean(latent_mean, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t)) return (latent_mean, observation_mean) return mean_step
Build a callable that performs one step of Kalman mean recursion. Args: get_transition_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[latent_size, latent_size]`. get_transition_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[latent_size]`. get_observation_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[observation_size, observation_size]`. get_observation_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[observation_size]`. Returns: kalman_mean_step: a callable that computes latent state and observation means at time `t`, given latent mean at time `t-1`.
def locked_put(self, credentials): """Write a Credentials to the Django datastore. Args: credentials: Credentials, the credentials to store. """ entity, _ = self.model_class.objects.get_or_create( **{self.key_name: self.key_value}) setattr(entity, self.property_name, credentials) entity.save()
Write a Credentials to the Django datastore. Args: credentials: Credentials, the credentials to store.
def lattice_from_abivars(cls=None, *args, **kwargs): """ Returns a `Lattice` object from a dictionary with the Abinit variables `acell` and either `rprim` in Bohr or `angdeg` If acell is not given, the Abinit default is used i.e. [1,1,1] Bohr Args: cls: Lattice class to be instantiated. pymatgen.core.lattice.Lattice if `cls` is None Example: lattice_from_abivars(acell=3*[10], rprim=np.eye(3)) """ cls = Lattice if cls is None else cls kwargs.update(dict(*args)) d = kwargs rprim = d.get("rprim", None) angdeg = d.get("angdeg", None) acell = d["acell"] if rprim is not None: if angdeg is not None: raise ValueError("angdeg and rprimd are mutually exclusive") rprim = np.reshape(rprim, (3,3)) rprimd = [float(acell[i]) * rprim[i] for i in range(3)] # Call pymatgen constructors (note that pymatgen uses Angstrom instead of Bohr). return cls(ArrayWithUnit(rprimd, "bohr").to("ang")) elif angdeg is not None: angdeg = np.reshape(angdeg, 3) if np.any(angdeg <= 0.): raise ValueError("Angles must be > 0 but got %s" % str(angdeg)) if angdeg.sum() >= 360.: raise ValueError("The sum of angdeg must be lower that 360, angdeg %s" % str(angdeg)) # This code follows the implementation in ingeo.F90 # See also http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#angdeg tol12 = 1e-12 pi, sin, cos, sqrt = np.pi, np.sin, np.cos, np.sqrt rprim = np.zeros((3,3)) if (abs(angdeg[0] -angdeg[1]) < tol12 and abs(angdeg[1] - angdeg[2]) < tol12 and abs(angdeg[0]-90.) + abs(angdeg[1]-90.) + abs(angdeg[2] -90) > tol12): # Treat the case of equal angles (except all right angles): # generates trigonal symmetry wrt third axis cosang = cos(pi * angdeg[0]/180.0) a2 = 2.0/3.0*(1.0 - cosang) aa = sqrt(a2) cc = sqrt(1.0-a2) rprim[0,0] = aa ; rprim[0,1] = 0.0 ; rprim[0,2] = cc rprim[1,0] = -0.5*aa; rprim[1,1] = sqrt(3.0)*0.5*aa ; rprim[1,2] = cc rprim[2,0] = -0.5*aa; rprim[2,1] = -sqrt(3.0)*0.5*aa; rprim[2,2] = cc else: # Treat all the other cases rprim[0,0] = 1.0 rprim[1,0] = cos(pi*angdeg[2]/180.) rprim[1,1] = sin(pi*angdeg[2]/180.) rprim[2,0] = cos(pi*angdeg[1]/180.) rprim[2,1] = (cos(pi*angdeg[0]/180.0)-rprim[1,0]*rprim[2,0])/rprim[1,1] rprim[2,2] = sqrt(1.0-rprim[2,0]**2-rprim[2,1]**2) # Call pymatgen constructors (note that pymatgen uses Angstrom instead of Bohr). rprimd = [float(acell[i]) * rprim[i] for i in range(3)] return cls(ArrayWithUnit(rprimd, "bohr").to("ang")) raise ValueError("Don't know how to construct a Lattice from dict:\n%s" % pformat(d))
Returns a `Lattice` object from a dictionary with the Abinit variables `acell` and either `rprim` in Bohr or `angdeg` If acell is not given, the Abinit default is used i.e. [1,1,1] Bohr Args: cls: Lattice class to be instantiated. pymatgen.core.lattice.Lattice if `cls` is None Example: lattice_from_abivars(acell=3*[10], rprim=np.eye(3))
def build(self): """Build straight helix along z-axis, starting with CA1 on x-axis""" ang_per_res = (2 * numpy.pi) / self.residues_per_turn atom_offsets = _atom_offsets[self.helix_type] if self.handedness == 'l': handedness = -1 else: handedness = 1 atom_labels = ['N', 'CA', 'C', 'O'] if all([x in atom_offsets.keys() for x in atom_labels]): res_label = 'GLY' else: res_label = 'UNK' monomers = [] for i in range(self.num_monomers): residue = Residue(mol_code=res_label, ampal_parent=self) atoms_dict = OrderedDict() for atom_label in atom_labels: r, zeta, z_shift = atom_offsets[atom_label] rot_ang = ((i * ang_per_res) + zeta) * handedness z = (self.rise_per_residue * i) + z_shift coords = cylindrical_to_cartesian( radius=r, azimuth=rot_ang, z=z, radians=True) atom = Atom( coordinates=coords, element=atom_label[0], ampal_parent=residue, res_label=atom_label) atoms_dict[atom_label] = atom residue.atoms = atoms_dict monomers.append(residue) self._monomers = monomers self.relabel_monomers() self.relabel_atoms() return
Build straight helix along z-axis, starting with CA1 on x-axis
def run_epilogue(self): """Run the epilogue script in the current working directory. raises: subprocess.CalledProcessError: if the script does not finish with exit code zero. """ logger = logging.getLogger(__name__) if self.epilogue is not None: with tempfile.NamedTemporaryFile('w', delete=False) as epilogue_fh: epilogue_fh.write(self.epilogue) tempfilename = epilogue_fh.name set_executable(tempfilename) try: sp.check_output( [tempfilename, ], stderr=sp.STDOUT) except sp.CalledProcessError as e: logger.error(dedent(r''' Epilogue script did not exit cleanly. CWD: {cwd} epilogue: --- {epilogue} --- response: --- {response} --- ''').format(cwd=os.getcwd(), epilogue=self.epilogue, response=e.output)) raise finally: os.unlink(tempfilename)
Run the epilogue script in the current working directory. raises: subprocess.CalledProcessError: if the script does not finish with exit code zero.
def get_order(self, order_id): """ See more: http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder """ url = "{0}/{1}/accounts/{2}/orders/{3}".format( self.domain, self.API_VERSION, self.account_id, order_id ) try: return self._Client__call(uri=url, method="get") except RequestException: return False except AssertionError: return False
See more: http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder
def Process(self, parser_mediator, root_item=None, **kwargs): """Parses a document summary information OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root item is not set. """ # This will raise if unhandled keyword arguments are passed. super(DocumentSummaryInformationOLECFPlugin, self).Process( parser_mediator, **kwargs) if not root_item: raise ValueError('Root item not set.') root_creation_time, root_modification_time = self._GetTimestamps(root_item) for item_name in self.REQUIRED_ITEMS: item = root_item.get_sub_item_by_name(item_name) if not item: continue summary_information = OLECFDocumentSummaryInformation(item) event_data = summary_information.GetEventData( data_type='olecf:document_summary_info') event_data.name = 'Document Summary Information' if root_creation_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_creation_time) event = OLECFDocumentSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if root_modification_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_modification_time) event = OLECFDocumentSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a document summary information OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root item is not set.
def _parse_xml(self): """Extracts the XML settings into class instances that can operate on the settings to perform the testing functions. """ import xml.etree.ElementTree as ET from os import path #This dict has the keys of XML tags that are required in order for the #CI server to run the repo. When each one is parsed, we change its value #to True and then check that they are all true at the end. required = {"testing": False, "wiki": False} #Make sure the file exists and then import it as XML and read the values out. if path.isfile(self.filepath): tree = ET.parse(self.filepath) vms("Parsing XML tree from {}.".format(self.filepath), 2) root = tree.getroot() if root.tag != "cirepo": raise ValueError("The root tag in a continuous integration settings XML " "file should be a <cirepo> tag.") self._parse_repo(root) for child in root: if child.tag == "cron": if self.server is not None: self.server.cron.settings[self.name] = CronSettings(child) if child.tag == "testing": self.testing = TestingSettings(child) if child.tag == "static": self.static = StaticSettings(child) if child.tag == "wiki": self.wiki["user"] = get_attrib(child, "user", "wiki") self.wiki["password"] = get_attrib(child, "password", "wiki") self.wiki["basepage"] = get_attrib(child, "basepage", "wiki") if child.tag in required: required[child.tag] = True if not all(required.values()): tags = ', '.join(["<{}>".format(t) for t in required]) raise ValueError("{} are required tags in the repo's XML settings file.".format(tags))
Extracts the XML settings into class instances that can operate on the settings to perform the testing functions.
def buy(self, account_id, **params): """https://developers.coinbase.com/api/v2#buy-bitcoin""" if 'amount' not in params and 'total' not in params: raise ValueError("Missing required parameter: 'amount' or 'total'") for required in ['currency', 'payment_method']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'accounts', account_id, 'buys', data=params) return self._make_api_object(response, Buy)
https://developers.coinbase.com/api/v2#buy-bitcoin
def decode_array(values): """ Decode the values which are bytestrings. """ out = [] for val in values: try: out.append(val.decode('utf8')) except AttributeError: out.append(val) return out
Decode the values which are bytestrings.
def to_pickle(obj, path, compression='infer', protocol=pickle.HIGHEST_PROTOCOL): """ Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) f, fh = _get_handle(path, 'wb', compression=compression, is_text=False) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: f.write(pickle.dumps(obj, protocol=protocol)) finally: f.close() for _f in fh: _f.close()
Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl")
def dihed_iter(self, g_nums, ats_1, ats_2, ats_3, ats_4, \ invalid_error=False): """ Iterator over selected dihedral angles. Angles are in degrees as with :meth:`dihed_single`. See `above <toc-generators_>`_ for more information on calling options. Parameters ---------- g_nums |int| or iterable |int| or |None| -- Indices of the desired geometry ats_1 |int| or iterable |int| or |None| -- Indices of the first atoms ats_2 |int| or iterable |int| or |None| -- Indices of the second atoms ats_3 |int| or iterable |int| or |None| -- Indices of the third atoms ats_4 |int| or iterable |int| or |None| -- Indices of the fourth atoms invalid_error |bool|, optional -- If |False| (the default), |None| values are returned for results corresponding to invalid indices. If |True|, exceptions are raised per normal. Yields ------ dihed |npfloat_| -- Out-of-plane/dihedral angles in degrees for the indicated atom sets `ats_1`-`ats_2`-`ats_3`-`ats_4`, drawn from the respective `g_nums`. Raises ------ ~exceptions.IndexError If an invalid (out-of-range) `g_num` or `at_#` is provided. ~exceptions.ValueError If all iterable objects are not the same length. ~exceptions.ValueError If any corresponding `ats_#` indices are equal. ~opan.error.XYZError (typecode :data:`~opan.error.XYZError.DIHED`) If either of the atom trios (1-2-3 or 2-3-4) is too close to linearity for any group of `ats_#` """ # Suitability of ats_n indices will be checked within the # self.dihed_single() calls and thus no check is needed here. # Import the tuple-generating function from .utils import pack_tups # Print the function inputs if debug mode is on if _DEBUG: # pragma: no cover print("g_nums = {0}".format(g_nums)) print("ats_1 = {0}".format(ats_1)) print("ats_2 = {0}".format(ats_2)) print("ats_3 = {0}".format(ats_3)) print("ats_4 = {0}".format(ats_4)) ## end if # Perform the None substitution arglist = self._none_subst(g_nums, ats_1, ats_2, ats_3, ats_4) # Expand/pack the tuples from the inputs tups = pack_tups(*arglist) # Dump the results if debug mode is on if _DEBUG: # pragma: no cover print(tups) ## end if # Construct the generator using the packed tuples. for tup in tups: yield self._iter_return(tup, self.dihed_single, invalid_error)
Iterator over selected dihedral angles. Angles are in degrees as with :meth:`dihed_single`. See `above <toc-generators_>`_ for more information on calling options. Parameters ---------- g_nums |int| or iterable |int| or |None| -- Indices of the desired geometry ats_1 |int| or iterable |int| or |None| -- Indices of the first atoms ats_2 |int| or iterable |int| or |None| -- Indices of the second atoms ats_3 |int| or iterable |int| or |None| -- Indices of the third atoms ats_4 |int| or iterable |int| or |None| -- Indices of the fourth atoms invalid_error |bool|, optional -- If |False| (the default), |None| values are returned for results corresponding to invalid indices. If |True|, exceptions are raised per normal. Yields ------ dihed |npfloat_| -- Out-of-plane/dihedral angles in degrees for the indicated atom sets `ats_1`-`ats_2`-`ats_3`-`ats_4`, drawn from the respective `g_nums`. Raises ------ ~exceptions.IndexError If an invalid (out-of-range) `g_num` or `at_#` is provided. ~exceptions.ValueError If all iterable objects are not the same length. ~exceptions.ValueError If any corresponding `ats_#` indices are equal. ~opan.error.XYZError (typecode :data:`~opan.error.XYZError.DIHED`) If either of the atom trios (1-2-3 or 2-3-4) is too close to linearity for any group of `ats_#`
def confdate(self): """Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD). """ date = self._confevent.get('confdate', {}) if len(date) > 0: start = {k: int(v) for k, v in date['startdate'].items()} end = {k: int(v) for k, v in date['enddate'].items()} return ((start['@year'], start['@month'], start['@day']), (end['@year'], end['@month'], end['@day'])) else: return ((None, None, None), (None, None, None))
Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD).
def create(self, data, **kwargs): """Create a new object. Args: data (dict): Parameters to send to the server to create the resource **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request Returns: RESTObject: A new instance of the manage object class build with the data sent by the server """ CreateMixin._check_missing_create_attrs(self, data) path = '%s/%s' % (self.path, data.pop('issue_id')) server_data = self.gitlab.http_post(path, **kwargs) # The epic_issue_id attribute doesn't exist when creating the resource, # but is used everywhere elese. Let's create it to be consistent client # side server_data['epic_issue_id'] = server_data['id'] return self._obj_cls(self, server_data)
Create a new object. Args: data (dict): Parameters to send to the server to create the resource **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request Returns: RESTObject: A new instance of the manage object class build with the data sent by the server
def load(self, id): #pylint:disable=redefined-builtin """ Retrieves one object from the pickler with the provided id. :param id: an ID to use """ l.debug("LOAD: %s", id) try: l.debug("... trying cached") return self._object_cache[id] except KeyError: l.debug("... cached failed") with self._read_context(id) as u: return VaultUnpickler(self, u).load()
Retrieves one object from the pickler with the provided id. :param id: an ID to use
def collect(cls, sources): """ :param sources: dictionaries with a key 'tectonicRegion' :returns: an ordered list of SourceGroup instances """ source_stats_dict = {} for src in sources: trt = src['tectonicRegion'] if trt not in source_stats_dict: source_stats_dict[trt] = SourceGroup(trt) sg = source_stats_dict[trt] if not sg.sources: # we append just one source per SourceGroup, so that # the memory occupation is insignificant sg.sources.append(src) # return SourceGroups, ordered by TRT string return sorted(source_stats_dict.values())
:param sources: dictionaries with a key 'tectonicRegion' :returns: an ordered list of SourceGroup instances
def to_protobuf(self) -> SaveStateProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto """ result = SaveStateProto() result.version = str(self.version) result.last_update.CopyFrom(datetime_to_timestamp(self.last_update)) result.plugin_info.CopyFrom(self.plugin_info.to_protobuf()) for key, link_item in self.link_item_dict.items(): result.data[key].CopyFrom(link_item.to_protobuf()) return result
Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto
def discover_resources(): """ Searches for translations files matching [catalog].[lang].[format] Traverses TRANZ_LOCALE_PATHS: - | TRANZ_LOCALE_PATHS +- messages.fr.yml | messages.en.yml And apps paths if TRANZ_SEARCH_LOCALE_IN_APPS is set to True): - | app_path +- TRANZ_DIR_NAME +- messages.fr.yml | messages.en.yml @rtype: list @return: A list of all found translation files """ locale_discovery_paths = list(settings.TRANZ_LOCALE_PATHS) if settings.TRANZ_SEARCH_LOCALE_IN_APPS: locale_discovery_paths += [os.path.join(app.path, settings.TRANZ_DIR_NAME) for app in list(apps.app_configs.values())] APP_LANGUAGES = [l[0] for l in settings.TRANZ_LANGUAGES] resources = [] for path in locale_discovery_paths: if not os.path.isdir(path): continue # Try to match direct children or discovery paths for file in os.listdir(path): if os.path.isfile(os.path.join(path, file)): try: domain, lang, format = file.split('.') except ValueError as e: continue resources.append((format, os.path.join(path, file), lang, domain)) # Try to match django's LC_MESSAGES directories if settings.TRANZ_REPLACE_DJANGO_TRANSLATIONS: for lang in APP_LANGUAGES: if os.path.isdir(os.path.join(path, lang)): LC_MESSAGES_PATH = os.path.join(path, lang, 'LC_MESSAGES') if os.path.isdir(LC_MESSAGES_PATH): for file in os.listdir(LC_MESSAGES_PATH): try: domain, format = file.split('.') except ValueError as e: continue resources.append((format, os.path.join(LC_MESSAGES_PATH, file), lang, domain)) return resources
Searches for translations files matching [catalog].[lang].[format] Traverses TRANZ_LOCALE_PATHS: - | TRANZ_LOCALE_PATHS +- messages.fr.yml | messages.en.yml And apps paths if TRANZ_SEARCH_LOCALE_IN_APPS is set to True): - | app_path +- TRANZ_DIR_NAME +- messages.fr.yml | messages.en.yml @rtype: list @return: A list of all found translation files
def stop(self): """Stop the daemon.""" pid = None if os.path.exists(self.pidfile): with open(self.pidfile, 'r') as fp: pid = int(fp.read().strip()) if not pid: msg = 'pidfile (%s) does not exist. Daemon not running?\n' sys.stderr.write(msg % self.pidfile) return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as e: e = str(e) if e.find('No such process') > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print(e) sys.exit(1)
Stop the daemon.
def _get_conda_version(stdout, stderr): """Callback for get_conda_version.""" # argparse outputs version to stderr in Python < 3.4. # http://bugs.python.org/issue18920 pat = re.compile(r'conda:?\s+(\d+\.\d\S+|unknown)') m = pat.match(stderr.decode().strip()) if m is None: m = pat.match(stdout.decode().strip()) if m is None: raise Exception('output did not match: {0}'.format(stderr)) return m.group(1)
Callback for get_conda_version.
def unpack_post(environ, content_length): """ Unpacks a post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters. """ post_body = environ['wsgi.input'].read(content_length).decode("utf-8") data = None if "application/x-www-form-urlencoded" in environ["CONTENT_TYPE"]: data = dict(parse_qsl(post_body)) elif "application/json" in environ["CONTENT_TYPE"]: data = json.loads(post_body) logger.debug("unpack_post:: %s", data) return data
Unpacks a post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters.
def _get_localhost_ssh_port(): """Something in the VM chain, either VirtualBox or Machine, helpfully sets up localhost-to-VM forwarding on port 22. We can inspect this rule to determine the port on localhost which gets forwarded to 22 in the VM.""" for line in _get_vm_config(): if line.startswith('Forwarding'): spec = line.split('=')[1].strip('"') name, protocol, host, host_port, target, target_port = spec.split(',') if name == 'ssh' and protocol == 'tcp' and target_port == '22': return host_port raise ValueError('Could not determine localhost port for SSH forwarding')
Something in the VM chain, either VirtualBox or Machine, helpfully sets up localhost-to-VM forwarding on port 22. We can inspect this rule to determine the port on localhost which gets forwarded to 22 in the VM.
def moderators(self, limit=None): """GETs moderators for this subreddit. Calls :meth:`narwal.Reddit.moderators`. :param limit: max number of items to return """ return self._reddit.moderators(self.display_name, limit=limit)
GETs moderators for this subreddit. Calls :meth:`narwal.Reddit.moderators`. :param limit: max number of items to return
def _validate_obj_by_schema(self, obj, obj_nex_id, vc): """Creates: errors if `obj` does not contain keys in the schema.ALLOWED_KEY_SET, warnings if `obj` lacks keys listed in schema.EXPECETED_KEY_SET, or if `obj` contains keys not listed in schema.ALLOWED_KEY_SET. """ return self._validate_id_obj_list_by_schema([(obj_nex_id, obj)], vc, group_by_warning=False)
Creates: errors if `obj` does not contain keys in the schema.ALLOWED_KEY_SET, warnings if `obj` lacks keys listed in schema.EXPECETED_KEY_SET, or if `obj` contains keys not listed in schema.ALLOWED_KEY_SET.
def basic_retinotopy_data(hemi, retino_type): ''' basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi and retinotopy type t; it does this by looking at the properties in hemi and picking out any combination that is commonly used to denote empirical retinotopy data. These common names are stored in _predicted_retintopy_names, in order of preference, which may be modified. The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'. Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this function calls both of these (predicted first then empirical) in the case that it does not find a valid property. ''' dat = _retinotopy_names[retino_type.lower()] val = next((hemi.prop(s) for s in six.iterkeys(hemi.properties) if s.lower() in dat), None) if val is None and retino_type.lower() != 'weight': val = predicted_retinotopy_data(hemi, retino_type) if val is None and retino_type.lower() != 'visual_area': val = empirical_retinotopy_data(hemi, retino_type) return val
basic_retinotopy_data(hemi, t) yields a numpy array of data for the given cortex object hemi and retinotopy type t; it does this by looking at the properties in hemi and picking out any combination that is commonly used to denote empirical retinotopy data. These common names are stored in _predicted_retintopy_names, in order of preference, which may be modified. The argument t should be one of 'polar_angle', 'eccentricity', 'visual_area', or 'weight'. Unlike the related functions empirical_retinotopy_data and predicted_retinotopy_data, this function calls both of these (predicted first then empirical) in the case that it does not find a valid property.
def _create_struct(data, session): """Create a struct from session data. """ out = Struct() for name in data.dtype.names: item = data[name] # Extract values that are cells (they are doubly wrapped). if isinstance(item, np.ndarray) and item.dtype.kind == 'O': item = item.squeeze().tolist() out[name] = _extract(item, session) return out
Create a struct from session data.
def uploader(func): """This method only used for CKEditor under version 4.5, in newer version, you should use ``upload_success()`` and ``upload_fail()`` instead. Decorated the view function that handle the file upload. The upload view must return the uploaded image's url. For example:: from flask import send_from_directory app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) @ckeditor.uploader def upload(): f = request.files.get('upload') f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return url .. versionadded:: 0.3 """ @wraps(func) def wrapper(*args, **kwargs): func_num = request.args.get('CKEditorFuncNum') # ckeditor = request.args.get('CKEditor') # language code used for error message, not used yet. # lang_code = request.args.get('langCode') # the error message to display when upload failed. message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE'] url = func(*args, **kwargs) return Markup('''<script type="text/javascript"> window.parent.CKEDITOR.tools.callFunction(%s, "%s", "%s");</script>''' % (func_num, url, message)) return wrapper
This method only used for CKEditor under version 4.5, in newer version, you should use ``upload_success()`` and ``upload_fail()`` instead. Decorated the view function that handle the file upload. The upload view must return the uploaded image's url. For example:: from flask import send_from_directory app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url @app.route('/files/<filename>') def uploaded_files(filename): path = '/the/uploaded/directory' return send_from_directory(path, filename) @app.route('/upload', methods=['POST']) @ckeditor.uploader def upload(): f = request.files.get('upload') f.save(os.path.join('/the/uploaded/directory', f.filename)) url = url_for('uploaded_files', filename=f.filename) return url .. versionadded:: 0.3
def on_lock(self, widget, data=None): """Locks respective selected core element""" path_list = None if self.view is not None: model, path_list = self.tree_view.get_selection().get_selected_rows() models = [self.list_store[path][self.MODEL_STORAGE_ID] for path in path_list] if path_list else [] if models: if len(models) > 1: self._logger.warning("Please select only one element to be locked.") try: self.model.global_variable_manager.lock_variable(models[0]) except AttributeError as e: self._logger.warning("The respective core element of {1}.list_store couldn't be locked. -> {0}" "".format(e, self.__class__.__name__)) return True else: self._logger.warning("Please select an element to be locked.")
Locks respective selected core element
def prepare_samples(job, patient_dict, univ_options): """ Obtain the input files for the patient and write them to the file store. :param dict patient_dict: The input fastq dict patient_dict: |- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': str |- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': str |- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': str |- 'mutation_vcf': str |- 'hla_haplotype_files': str +- 'patient_id': str :param dict univ_options: Dict of universal options used by almost all tools :return: Updated fastq dict output_dict: |- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': fsID |- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': fsID |- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': fsID |- 'mutation_vcf': fsID |- 'hla_haplotype_files': fsId +- 'patient_id': str :rtype: dict """ job.fileStore.logToMaster('Downloading Inputs for %s' % univ_options['patient']) # For each sample type, check if the prefix is an S3 link or a regular file # Download S3 files. output_dict = {} for input_file in patient_dict: if not input_file.endswith(('bam', 'bai', '_1', '_2', 'files', 'vcf', 'bedpe')): output_dict[input_file] = patient_dict[input_file] continue output_dict[input_file] = get_pipeline_inputs( job, ':'.join([univ_options['patient'], input_file]), patient_dict[input_file], encryption_key=(univ_options['sse_key'] if patient_dict['ssec_encrypted'] else None), per_file_encryption=univ_options['sse_key_is_master'], gdc_download_token=univ_options['gdc_download_token']) return output_dict
Obtain the input files for the patient and write them to the file store. :param dict patient_dict: The input fastq dict patient_dict: |- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': str |- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': str |- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': str |- 'mutation_vcf': str |- 'hla_haplotype_files': str +- 'patient_id': str :param dict univ_options: Dict of universal options used by almost all tools :return: Updated fastq dict output_dict: |- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': fsID |- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': fsID |- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': fsID |- 'mutation_vcf': fsID |- 'hla_haplotype_files': fsId +- 'patient_id': str :rtype: dict
def relatedness_title(tt_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API): ''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("tt", tt_pairs, gcube_token, lang, api)
Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
def attachment_md5(self): ''' Calculate the checksum of the file upload. For binary files (e.g. PDFs), the MD5 of the file itself is used. Archives are unpacked and the MD5 is generated from the sanitized textfiles in the archive. This is done with some smartness: - Whitespace and tabs are removed before comparison. - For MD5, ordering is important, so we compute it on the sorted list of file hashes. ''' MAX_MD5_FILE_SIZE = 10000 md5_set = [] def md5_add_text(text): try: text = str(text, errors='ignore') text = text.replace(' ', '').replace( '\n', '').replace('\t', '') hexvalues = hashlib.md5(text.encode('utf-8')).hexdigest() md5_set.append(hexvalues) except Exception as e: # not unicode decodable pass def md5_add_file(f): try: md5 = hashlib.md5() for chunk in f.chunks(): md5.update(chunk) md5_set.append(md5.hexdigest()) except Exception: pass try: if zipfile.is_zipfile(self.attachment.path): zf = zipfile.ZipFile(self.attachment.path, 'r') for zipinfo in zf.infolist(): if zipinfo.file_size < MAX_MD5_FILE_SIZE: md5_add_text(zf.read(zipinfo)) elif tarfile.is_tarfile(self.attachment.path): tf = tarfile.open(self.attachment.path, 'r') for tarinfo in tf.getmembers(): if tarinfo.isfile(): if tarinfo.size < MAX_MD5_FILE_SIZE: md5_add_text(tf.extractfile(tarinfo).read()) else: md5_add_file(self.attachment) except Exception as e: logger.warning( "Exception on archive MD5 computation, using file checksum: " + str(e)) result = hashlib.md5( ''.join(sorted(md5_set)).encode('utf-8')).hexdigest() return result
Calculate the checksum of the file upload. For binary files (e.g. PDFs), the MD5 of the file itself is used. Archives are unpacked and the MD5 is generated from the sanitized textfiles in the archive. This is done with some smartness: - Whitespace and tabs are removed before comparison. - For MD5, ordering is important, so we compute it on the sorted list of file hashes.
def print_tables(xmldoc, output, output_format, tableList = [], columnList = [], round_floats = True, decimal_places = 2, format_links = True, title = None, print_table_names = True, unique_rows = False, row_span_columns = [], rspan_break_columns = []): """ Method to print tables in an xml file in other formats. Input is an xmldoc, output is a file object containing the tables. @xmldoc: document to convert @output: file object to write output to; if None, will write to stdout @output_format: format to convert to @tableList: only convert the listed tables. Default is to convert all the tables found in the xmldoc. Tables not converted will not be included in the returned file object. @columnList: only print the columns listed, in the order given. This applies to all tables (if a table doesn't have a listed column, it's just skipped). To specify a column in a specific table, use table_name:column_name. Default is to print all columns. @round_floats: If turned on, will smart_round floats to specifed number of places. @format_links: If turned on, will convert any html hyperlinks to specified output_format. @decimal_places: If round_floats turned on, will smart_round to this number of decimal places. @title: Add a title to this set of tables. @unique_rows: If two consecutive rows are exactly the same, will condense into one row. @print_table_names: If set to True, will print the name of each table in the caption section. @row_span_columns: For the columns listed, will concatenate consecutive cells with the same values into one cell that spans those rows. Default is to span no rows. @rspan_break_column: Columns listed will prevent all cells from rowspanning across two rows in which values in the columns are diffrent. Default is to have no break columns. """ # get the tables to convert if tableList == []: tableList = [tb.getAttribute("Name") for tb in xmldoc.childNodes[0].getElementsByTagName(u'Table')] # set the output if output is None: output = sys.stdout # get table bits ttx, xtt, tx, xt, capx, xcap, rx, xr, cx, xc, rspx, xrsp, hlx, hxl, xhl = set_output_format( output_format ) # set the title if desired if title is not None: print >> output, "%s%s%s" %(ttx,str(title),xtt) # cycle over the tables in the xmldoc for table_name in tableList: this_table = table.get_table(xmldoc, table_name) if columnList == []: col_names = [ col.getAttribute("Name").split(":")[-1] for col in this_table.getElementsByTagName(u'Column') ] else: requested_columns = [col.split(':')[-1] for col in columnList if not (':' in col and col.split(':')[0] != table_name) ] requested_columns = sorted(set(requested_columns), key=requested_columns.index) actual_columns = [actual_column.getAttribute("Name").split(":")[-1] for actual_column in this_table.getElementsByTagName(u'Column') ] col_names = [col for col in requested_columns if col in actual_columns] # get the relevant row_span/break column indices rspan_indices = [ n for n,col in enumerate(col_names) if col in row_span_columns or ':'.join([table_name,col]) in row_span_columns ] break_indices = [ n for n,col in enumerate(col_names) if col in rspan_break_columns or ':'.join([table_name,col]) in rspan_break_columns ] # start the table and print table name print >> output, tx if print_table_names: print >> output, "%s%s%s" %(capx, table_name, xcap) print >> output, "%s%s%s%s%s" %(rx, cx, (xc+cx).join(format_header_cell(val) for val in col_names), xc, xr) # format the data in the table out_table = [] last_row = '' for row in this_table: out_row = [ str(format_cell( get_row_data(row, col_name), round_floats = round_floats, decimal_places = decimal_places, format_links = format_links, hlx = hlx, hxl = hxl, xhl = xhl )) for col_name in col_names ] if unique_rows and out_row == last_row: continue out_table.append(out_row) last_row = out_row rspan_count = {} for mm, row in enumerate(out_table[::-1]): this_row_idx = len(out_table) - (mm+1) next_row_idx = this_row_idx - 1 # cheack if it's ok to do row-span rspan_ok = rspan_indices != [] and this_row_idx != 0 if rspan_ok: for jj in break_indices: rspan_ok = out_table[this_row_idx][jj] == out_table[next_row_idx][jj] if not rspan_ok: break # cycle over columns in the row setting row span values for nn, val in enumerate(row): # check if this cell should be spanned; # if so, delete it, update rspan_count and go on to next cell if rspan_ok and nn in rspan_indices: if val == out_table[next_row_idx][nn]: out_table[this_row_idx][nn] = '' if (this_row_idx, nn) in rspan_count: rspan_count[(next_row_idx,nn)] = rspan_count[(this_row_idx,nn)] + 1 del rspan_count[(this_row_idx,nn)] else: rspan_count[(next_row_idx,nn)] = 2 elif (this_row_idx, nn) in rspan_count: out_table[this_row_idx][nn] = ''.join([rspx, str(rspan_count[(this_row_idx,nn)]), xrsp, str(val), xc]) else: out_table[this_row_idx][nn] = ''.join([cx, str(val), xc]) continue # format cell appropriately if (this_row_idx, nn) in rspan_count: out_table[this_row_idx][nn] = ''.join([rspx, str(rspan_count[(this_row_idx,nn)]), xrsp, str(val), xc]) else: out_table[this_row_idx][nn] = ''.join([cx, str(val), xc]) # print the table to output for row in out_table: print >> output, "%s%s%s" % (rx, ''.join(row), xr) # close the table and go on to the next print >> output, xt
Method to print tables in an xml file in other formats. Input is an xmldoc, output is a file object containing the tables. @xmldoc: document to convert @output: file object to write output to; if None, will write to stdout @output_format: format to convert to @tableList: only convert the listed tables. Default is to convert all the tables found in the xmldoc. Tables not converted will not be included in the returned file object. @columnList: only print the columns listed, in the order given. This applies to all tables (if a table doesn't have a listed column, it's just skipped). To specify a column in a specific table, use table_name:column_name. Default is to print all columns. @round_floats: If turned on, will smart_round floats to specifed number of places. @format_links: If turned on, will convert any html hyperlinks to specified output_format. @decimal_places: If round_floats turned on, will smart_round to this number of decimal places. @title: Add a title to this set of tables. @unique_rows: If two consecutive rows are exactly the same, will condense into one row. @print_table_names: If set to True, will print the name of each table in the caption section. @row_span_columns: For the columns listed, will concatenate consecutive cells with the same values into one cell that spans those rows. Default is to span no rows. @rspan_break_column: Columns listed will prevent all cells from rowspanning across two rows in which values in the columns are diffrent. Default is to have no break columns.
def param_particle_rad(self, ind): """ Get radius of one or more particles """ ind = self._vps(listify(ind)) return [self._i2p(i, 'a') for i in ind]
Get radius of one or more particles
def parse(self, raw_content, find_message_cb): """Function parses the RAW AS2 MDN, verifies it and extracts the processing status of the orginal AS2 message. :param raw_content: A byte string of the received HTTP headers followed by the body. :param find_message_cb: A callback the must returns the original Message Object. The original message-id and original recipient AS2 ID are passed as arguments to it. :returns: A two element tuple containing (status, detailed_status). The status is a string indicating the status of the transaction. The optional detailed_status gives additional information about the processing status. """ status, detailed_status = None, None self.payload = parse_mime(raw_content) self.orig_message_id, orig_recipient = self.detect_mdn() # Call the find message callback which should return a Message instance orig_message = find_message_cb(self.orig_message_id, orig_recipient) # Extract the headers and save it mdn_headers = {} for k, v in self.payload.items(): k = k.lower() if k == 'message-id': self.message_id = v.lstrip('<').rstrip('>') mdn_headers[k] = v if orig_message.receiver.mdn_digest_alg \ and self.payload.get_content_type() != 'multipart/signed': status = 'failed/Failure' detailed_status = 'Expected signed MDN but unsigned MDN returned' return status, detailed_status if self.payload.get_content_type() == 'multipart/signed': signature = None message_boundary = ( '--' + self.payload.get_boundary()).encode('utf-8') for part in self.payload.walk(): if part.get_content_type() == 'application/pkcs7-signature': signature = part.get_payload(decode=True) elif part.get_content_type() == 'multipart/report': self.payload = part # Verify the message, first using raw message and if it fails # then convert to canonical form and try again mic_content = extract_first_part(raw_content, message_boundary) verify_cert = orig_message.receiver.load_verify_cert() try: self.digest_alg = verify_message( mic_content, signature, verify_cert) except IntegrityError: mic_content = canonicalize(self.payload) self.digest_alg = verify_message( mic_content, signature, verify_cert) for part in self.payload.walk(): if part.get_content_type() == 'message/disposition-notification': # logger.debug('Found MDN report for message %s:\n%s' % ( # orig_message.message_id, part.as_string())) mdn = part.get_payload()[-1] mdn_status = mdn['Disposition'].split( ';').pop().strip().split(':') status = mdn_status[0] if status == 'processed': mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0] # TODO: Check MIC for all cases if mdn_mic and orig_message.mic \ and mdn_mic != orig_message.mic.decode(): status = 'processed/warning' detailed_status = 'Message Integrity check failed.' else: detailed_status = ' '.join(mdn_status[1:]).strip() return status, detailed_status
Function parses the RAW AS2 MDN, verifies it and extracts the processing status of the orginal AS2 message. :param raw_content: A byte string of the received HTTP headers followed by the body. :param find_message_cb: A callback the must returns the original Message Object. The original message-id and original recipient AS2 ID are passed as arguments to it. :returns: A two element tuple containing (status, detailed_status). The status is a string indicating the status of the transaction. The optional detailed_status gives additional information about the processing status.
def primers(self): """ Read in the primer file, and create a properly formatted output file that takes any degenerate bases into account """ with open(self.formattedprimers, 'w') as formatted: for record in SeqIO.parse(self.primerfile, 'fasta'): # from https://stackoverflow.com/a/27552377 - find any degenerate bases in the primer sequence, and # create all possibilities as a list degenerates = Seq.IUPAC.IUPACData.ambiguous_dna_values try: primerlist = list(map("".join, product(*map(degenerates.get, str(record.seq))))) except TypeError: print("Invalid Primer Sequence: {seq}".format(seq=str(record.seq))) sys.exit() # As the record.id is being updated in the loop below, set the name of the primer here so that will # be able to be recalled when setting the new record.ids primername = record.id # Iterate through all the possible primers created from any degenerate bases for index, primer in enumerate(primerlist): # Update the primer name with the position in the list to keep the name unique record.id = primername + '_{index}'.format(index=index) # Clear the description, as, otherwise, it will be added, and there will be duplicate information record.description = '' # Create a seqrecord from the primer sequence record.seq = Seq.Seq(primer) # Write the properly-formatted records to file SeqIO.write(record, formatted, 'fasta') # Populate a dictionary to store the length of the primers - will be used in determining whether # BLAST hits are full-length self.faidict[record.id] = len(str(record.seq)) # Ensure that the kmer length used in the initial baiting is no larger than the shorted primer if len(str(record.seq)) < self.klength: self.klength = len(str(record.seq))
Read in the primer file, and create a properly formatted output file that takes any degenerate bases into account
def get_field_by_showname(self, showname): """ Gets a field by its "showname" (the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...', 'User-Agent' is the showname) Returns None if not found. """ for field in self._get_all_fields_with_alternates(): if field.showname_key == showname: # Return it if "XXX: whatever == XXX" return field
Gets a field by its "showname" (the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...', 'User-Agent' is the showname) Returns None if not found.
def change_svc_check_timeperiod(self, service, check_timeperiod): """Modify service check timeperiod Format of the line that triggers function call:: CHANGE_SVC_CHECK_TIMEPERIOD;<host_name>;<service_description>;<check_timeperiod> :param service: service to modify check timeperiod :type service: alignak.objects.service.Service :param check_timeperiod: timeperiod object :type check_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value service.check_period = check_timeperiod self.send_an_element(service.get_update_status_brok())
Modify service check timeperiod Format of the line that triggers function call:: CHANGE_SVC_CHECK_TIMEPERIOD;<host_name>;<service_description>;<check_timeperiod> :param service: service to modify check timeperiod :type service: alignak.objects.service.Service :param check_timeperiod: timeperiod object :type check_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None
def put_records(self, records, partition_key=None): """Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. """ for record in records: self.put_record(record, partition_key)
Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to.
def calculate_size(name, entry_processor, keys): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_data(entry_processor) data_size += INT_SIZE_IN_BYTES for keys_item in keys: data_size += calculate_size_data(keys_item) return data_size
Calculates the request payload size
def get_central_wave(wav, resp, weight=1.0): """Calculate the central wavelength or the central wavenumber, depending on which parameters is input. On default the weighting funcion is f(lambda)=1.0, but it is possible to add a custom weight, e.g. f(lambda) = 1./lambda**4 for Rayleigh scattering calculations """ # info: {'unit': unit, 'si_scale': si_scale} # To get the wavelenght/wavenumber in SI units (m or m-1): # wav = wav * info['si_scale'] # res = np.trapz(resp*wav, wav) / np.trapz(resp, wav) # Check if it is a wavelength or a wavenumber and convert to microns or cm-1: # This should perhaps be user defined!? # if info['unit'].find('-1') > 0: # Wavenumber: # res *= return np.trapz(resp * wav * weight, wav) / np.trapz(resp * weight, wav)
Calculate the central wavelength or the central wavenumber, depending on which parameters is input. On default the weighting funcion is f(lambda)=1.0, but it is possible to add a custom weight, e.g. f(lambda) = 1./lambda**4 for Rayleigh scattering calculations
def _set_mpls_config(self, v, load=False): """ Setter method for mpls_config, mapped from YANG variable /mpls_config (container) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_config() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mpls_config.mpls_config, is_container='container', presence=False, yang_name="mpls-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'151'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mpls_config must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=mpls_config.mpls_config, is_container='container', presence=False, yang_name="mpls-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'151'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__mpls_config = t if hasattr(self, '_set'): self._set()
Setter method for mpls_config, mapped from YANG variable /mpls_config (container) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_config() directly.
def _create_add_petabencana_layer_action(self): """Create action for import OSM Dialog.""" icon = resources_path('img', 'icons', 'add-petabencana-layer.svg') self.action_add_petabencana_layer = QAction( QIcon(icon), self.tr('Add PetaBencana Flood Layer'), self.iface.mainWindow()) self.action_add_petabencana_layer.setStatusTip(self.tr( 'Add PetaBencana Flood Layer')) self.action_add_petabencana_layer.setWhatsThis(self.tr( 'Use this to add a PetaBencana layer to your map. ' 'It needs internet access to function.')) self.action_add_petabencana_layer.triggered.connect( self.add_petabencana_layer) self.add_action( self.action_add_petabencana_layer, add_to_toolbar=self.full_toolbar)
Create action for import OSM Dialog.
def execute(self, output_options=None, sampling=None, context=None, query_params=None): """ Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Returns: A Job object that can be used to get the query results, or export to a file or dataframe Raises: Exception if query could not be executed. """ return self.execute_async(output_options, sampling=sampling, context=context, query_params=query_params).wait()
Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Returns: A Job object that can be used to get the query results, or export to a file or dataframe Raises: Exception if query could not be executed.
def predict(self, X, lengths=None): """Find most likely state sequence corresponding to ``X``. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- state_sequence : array, shape (n_samples, ) Labels for each sample from ``X``. """ _, state_sequence = self.decode(X, lengths) return state_sequence
Find most likely state sequence corresponding to ``X``. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- state_sequence : array, shape (n_samples, ) Labels for each sample from ``X``.
def shift_ordering_up(self, parent_id, position, db_session=None, *args, **kwargs): """ Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ return self.service.shift_ordering_up( parent_id=parent_id, position=position, db_session=db_session, *args, **kwargs )
Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return:
def set_variable(section, value, create): """ Set value of a variable in an environment file for the given section. If the variable is already defined, its value is replaced, otherwise, it is added to the end of the file. The value is given as "ENV_VAR_NAME=env_var_value", e.g.: s3conf set test ENV_VAR_NAME=env_var_value """ if not value: value = section section = None try: logger.debug('Running env command') settings = config.Settings(section=section) conf = s3conf.S3Conf(settings=settings) env_vars = conf.get_envfile() env_vars.set(value, create=create) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
Set value of a variable in an environment file for the given section. If the variable is already defined, its value is replaced, otherwise, it is added to the end of the file. The value is given as "ENV_VAR_NAME=env_var_value", e.g.: s3conf set test ENV_VAR_NAME=env_var_value
def get_ssl(self, host_and_port=None): """ Get SSL params for the given host. :param (str,int) host_and_port: the host/port pair we want SSL params for, default current_host_and_port """ if not host_and_port: host_and_port = self.current_host_and_port return self.__ssl_params.get(host_and_port)
Get SSL params for the given host. :param (str,int) host_and_port: the host/port pair we want SSL params for, default current_host_and_port
def contribution_maps_1d_from_hyper_images_and_galaxies(hyper_model_image_1d, hyper_galaxy_images_1d, hyper_galaxies, hyper_minimum_values): """For a fitting hyper_galaxy_image, hyper_galaxy model image, list of hyper galaxies images and model hyper galaxies, compute their contribution maps, which are used to compute a scaled-noise_map map. All quantities are masked 1D arrays. The reason this is separate from the *contributions_from_fitting_hyper_images_and_hyper_galaxies* function is that each hyper_galaxy image has a list of hyper galaxies images and associated hyper galaxies (one for each galaxy). Thus, this function breaks down the calculation of each 1D masked contribution map and returns them in the same datas structure (2 lists with indexes [image_index][contribution_map_index]. Parameters ---------- hyper_model_image_1d : ndarray The best-fit model image to the datas (e.g. from a previous analysis phase). hyper_galaxy_images_1d : [ndarray] The best-fit model image of each hyper galaxy to the datas (e.g. from a previous analysis phase). hyper_galaxies : [galaxy.Galaxy] The hyper galaxies which represent the model components used to scale the noise_map, which correspond to individual galaxies in the image. hyper_minimum_values : [float] The minimum value of each hyper_galaxy-image contribution map, which ensure zero's don't impact the scaled noise-map. """ # noinspection PyArgumentList return list(map(lambda hyper_galaxy, hyper_galaxy_image_1d, hyper_minimum_value: hyper_galaxy.contributions_from_model_image_and_galaxy_image(model_image=hyper_model_image_1d, galaxy_image=hyper_galaxy_image_1d, minimum_value=hyper_minimum_value), hyper_galaxies, hyper_galaxy_images_1d, hyper_minimum_values))
For a fitting hyper_galaxy_image, hyper_galaxy model image, list of hyper galaxies images and model hyper galaxies, compute their contribution maps, which are used to compute a scaled-noise_map map. All quantities are masked 1D arrays. The reason this is separate from the *contributions_from_fitting_hyper_images_and_hyper_galaxies* function is that each hyper_galaxy image has a list of hyper galaxies images and associated hyper galaxies (one for each galaxy). Thus, this function breaks down the calculation of each 1D masked contribution map and returns them in the same datas structure (2 lists with indexes [image_index][contribution_map_index]. Parameters ---------- hyper_model_image_1d : ndarray The best-fit model image to the datas (e.g. from a previous analysis phase). hyper_galaxy_images_1d : [ndarray] The best-fit model image of each hyper galaxy to the datas (e.g. from a previous analysis phase). hyper_galaxies : [galaxy.Galaxy] The hyper galaxies which represent the model components used to scale the noise_map, which correspond to individual galaxies in the image. hyper_minimum_values : [float] The minimum value of each hyper_galaxy-image contribution map, which ensure zero's don't impact the scaled noise-map.
def HHIPreFilter(config={}): """HHI pre-interlace filter. A widely used prefilter to prevent line twitter when converting sequential images to interlace. Coefficients taken from: 'Specification of a Generic Format Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison, CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September 1995. http://www.stephanepigeon.com/Docs/deliv2.pdf """ fil = numpy.array( [-4, 8, 25, -123, 230, 728, 230, -123, 25, 8, -4], dtype=numpy.float32).reshape((-1, 1, 1)) / numpy.float32(1000) resize = Resize(config=config) out_frame = Frame() out_frame.data = fil out_frame.type = 'fil' audit = out_frame.metadata.get('audit') audit += 'data = HHI pre-interlace filter\n' out_frame.metadata.set('audit', audit) resize.filter(out_frame) return resize
HHI pre-interlace filter. A widely used prefilter to prevent line twitter when converting sequential images to interlace. Coefficients taken from: 'Specification of a Generic Format Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison, CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September 1995. http://www.stephanepigeon.com/Docs/deliv2.pdf
def run_forecast(self): """ Updates card & runs for RAPID to GSSHA & LSM to GSSHA """ # ---------------------------------------------------------------------- # LSM to GSSHA # ---------------------------------------------------------------------- self.prepare_hmet() self.prepare_gag() # ---------------------------------------------------------------------- # RAPID to GSSHA # ---------------------------------------------------------------------- self.rapid_to_gssha() # ---------------------------------------------------------------------- # HOTSTART # ---------------------------------------------------------------------- self.hotstart() # ---------------------------------------------------------------------- # Run GSSHA # ---------------------------------------------------------------------- return self.run()
Updates card & runs for RAPID to GSSHA & LSM to GSSHA
def correlation_linear( values_1, values_2, printout = None ): """ This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size. """ r, p_value = scipy.stats.pearsonr(values_1, values_2) if printout is not True: return r, p_value else: text = ( "Pearson linear correlation coefficient: {r}\n" "2-tailed p-value: {p_value}" ).format( r = r, p_value = p_value ) return text
This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size.