code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def determine_packages_to_sync(self): """ Update the self.packages_to_sync to contain packages that need to be synced. """ # In case we don't find any changes we will stay on the currently # synced serial. self.target_serial = self.synced_serial self.packages_to_sync = {} logger.info(f"Current mirror serial: {self.synced_serial}") if self.todolist.exists(): # We started a sync previously and left a todo list as well as the # targetted serial. We'll try to keep going through the todo list # and then mark the targetted serial as done. logger.info("Resuming interrupted sync from local todo list.") saved_todo = iter(open(self.todolist, encoding="utf-8")) self.target_serial = int(next(saved_todo).strip()) for line in saved_todo: package, serial = line.strip().split() self.packages_to_sync[package] = int(serial) elif not self.synced_serial: logger.info("Syncing all packages.") # First get the current serial, then start to sync. This makes us # more defensive in case something changes on the server between # those two calls. self.packages_to_sync.update(self.master.all_packages()) self.target_serial = max( [self.synced_serial] + list(self.packages_to_sync.values()) ) else: logger.info("Syncing based on changelog.") self.packages_to_sync.update( self.master.changed_packages(self.synced_serial) ) self.target_serial = max( [self.synced_serial] + list(self.packages_to_sync.values()) ) # We can avoid downloading the main index page if we don't have # anything todo at all during a changelog-based sync. self.need_index_sync = bool(self.packages_to_sync) self._filter_packages() logger.info(f"Trying to reach serial: {self.target_serial}") pkg_count = len(self.packages_to_sync) logger.info(f"{pkg_count} packages to sync.")
Update the self.packages_to_sync to contain packages that need to be synced.
def changed_fields(self, from_db=False): """ Args: from_db (bool): Check changes against actual db data Returns: list: List of fields names which their values changed. """ if self.exist: current_dict = self.clean_value() # `from_db` attr is set False as default, when a `ListNode` is # initialized just after above `clean_value` is called. `from_db` flags # in 'list node sets' makes differences between clean_data and object._data. db_data = self._initial_data if from_db: # Thus, after clean_value, object's data is taken from db again. db_data = self.objects.data().get(self.key)[0] set_current, set_past = set(current_dict.keys()), set(db_data.keys()) intersect = set_current.intersection(set_past) return set(o for o in intersect if db_data[o] != current_dict[o])
Args: from_db (bool): Check changes against actual db data Returns: list: List of fields names which their values changed.
def to_internal_value(self, data): """ Dicts of native values <- Dicts of primitive datatypes. """ if html.is_html_input(data): data = html.parse_html_dict(data) if not isinstance(data, dict): self.fail('not_a_dict', input_type=type(data).__name__) if not self.allow_empty and len(data.keys()) == 0: message = self.error_messages['empty'] raise ValidationError({ api_settings.NON_FIELD_ERRORS_KEY: [message] }) return { six.text_type(key): self.child.run_validation(value) for key, value in data.items() }
Dicts of native values <- Dicts of primitive datatypes.
def add_flooded_field(self, shapefile_path): """Create the layer from the local shp adding the flooded field. .. versionadded:: 3.3 Use this method to add a calculated field to a shapefile. The shapefile should have a field called 'count' containing the number of flood reports for the field. The field values will be set to 0 if the count field is < 1, otherwise it will be set to 1. :param shapefile_path: Path to the shapefile that will have the flooded field added. :type shapefile_path: basestring :return: A vector layer with the flooded field added. :rtype: QgsVectorLayer """ layer = QgsVectorLayer( shapefile_path, self.tr('Jakarta Floods'), 'ogr') # Add a calculated field indicating if a poly is flooded or not # from qgis.PyQt.QtCore import QVariant layer.startEditing() # Add field with integer from 0 to 4 which represents the flood # class. Its the same as 'state' field except that is being treated # as a string. # This is used for cartography flood_class_field = QgsField('floodclass', QVariant.Int) layer.addAttribute(flood_class_field) layer.commitChanges() layer.startEditing() flood_class_idx = layer.fields().lookupField('floodclass') flood_class_expression = QgsExpression('to_int(state)') context = QgsExpressionContext() context.setFields(layer.fields()) flood_class_expression.prepare(context) # Add field with boolean flag to say if the area is flooded # This is used by the impact function flooded_field = QgsField('flooded', QVariant.Int) layer.dataProvider().addAttributes([flooded_field]) layer.commitChanges() layer.startEditing() flooded_idx = layer.fields().lookupField('flooded') flood_flag_expression = QgsExpression('state > 0') flood_flag_expression.prepare(context) for feature in layer.getFeatures(): context.setFeature(feature) feature[flood_class_idx] = flood_class_expression.evaluate(context) feature[flooded_idx] = flood_flag_expression.evaluate(context) layer.updateFeature(feature) layer.commitChanges() return layer
Create the layer from the local shp adding the flooded field. .. versionadded:: 3.3 Use this method to add a calculated field to a shapefile. The shapefile should have a field called 'count' containing the number of flood reports for the field. The field values will be set to 0 if the count field is < 1, otherwise it will be set to 1. :param shapefile_path: Path to the shapefile that will have the flooded field added. :type shapefile_path: basestring :return: A vector layer with the flooded field added. :rtype: QgsVectorLayer
def get_log_form(self, *args, **kwargs): """Pass through to provider LogAdminSession.get_log_form_for_update""" # Implemented from kitosid template for - # osid.resource.BinAdminSession.get_bin_form_for_update_template # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'log_record_types' in kwargs: return self.get_log_form_for_create(*args, **kwargs) else: return self.get_log_form_for_update(*args, **kwargs)
Pass through to provider LogAdminSession.get_log_form_for_update
def setIcon(self, icon): """ Sets the icon for this hotspot. If this method is called with a valid icon, then the style will automatically switch to Icon, otherwise, the style will be set to Invisible. :param icon | <QIcon> || <str> || None """ icon = QIcon(icon) if icon.isNull(): self._icon = None self._style = XNodeHotspot.Style.Invisible else: self._icon = icon self._style = XNodeHotspot.Style.Icon
Sets the icon for this hotspot. If this method is called with a valid icon, then the style will automatically switch to Icon, otherwise, the style will be set to Invisible. :param icon | <QIcon> || <str> || None
def overlap(self, value): """ Set the value of the ``<c:overlap>`` child element to *int_value*, or remove the overlap element if *int_value* is 0. """ if value == 0: self._element._remove_overlap() return self._element.get_or_add_overlap().val = value
Set the value of the ``<c:overlap>`` child element to *int_value*, or remove the overlap element if *int_value* is 0.
def header(self, k, v, replace=True): """ Sets header value. Replaces existing value if `replace` is True. Otherwise create a list of existing values and `v` :param k: Header key :param v: Header value :param replace: flag for setting mode. :type k: str :type v: str :type replace: bool """ if replace: self._headers[k] = [v] else: self._headers.setdefault(k, []).append(v) return self
Sets header value. Replaces existing value if `replace` is True. Otherwise create a list of existing values and `v` :param k: Header key :param v: Header value :param replace: flag for setting mode. :type k: str :type v: str :type replace: bool
def create_line_plot(df): """ create a mg line plot Args: df (pandas.DataFrame): data to plot """ fig = Figure("/mg/line_plot/", "mg_line_plot") fig.graphics.transition_on_update(True) fig.graphics.animate_on_load() fig.layout.set_size(width=450, height=200) fig.layout.set_margin(left=40, right=40) return LineChart(df, fig, "Date", ["value"], init_params={"Data": "Steps"}, timeseries=True)
create a mg line plot Args: df (pandas.DataFrame): data to plot
def disconnect(self, output_port, input_port): """ Remove a connection between (two ports of) :class:`.Effect` instances. For this, is necessary informs the output port origin and the input port destination:: >>> pedalboard.append(driver) >>> pedalboard.append(reverb) >>> driver_output = driver.outputs[0] >>> reverb_input = reverb.inputs[0] >>> pedalboard.connect(driver_output, reverb_input) >>> Connection(driver_output, reverb_input) in driver.connections True >>> pedalboard.disconnect(driver_output, reverb_input) >>> Connection(driver_output, reverb_input) in driver.connections False :param Port output_port: Effect output port :param Port input_port: Effect input port """ ConnectionClass = output_port.connection_class self.connections.remove(ConnectionClass(output_port, input_port))
Remove a connection between (two ports of) :class:`.Effect` instances. For this, is necessary informs the output port origin and the input port destination:: >>> pedalboard.append(driver) >>> pedalboard.append(reverb) >>> driver_output = driver.outputs[0] >>> reverb_input = reverb.inputs[0] >>> pedalboard.connect(driver_output, reverb_input) >>> Connection(driver_output, reverb_input) in driver.connections True >>> pedalboard.disconnect(driver_output, reverb_input) >>> Connection(driver_output, reverb_input) in driver.connections False :param Port output_port: Effect output port :param Port input_port: Effect input port
def install(root=None, expose=None): """Installs the default :class:`VendorImporter` for PEX vendored code. Any distributions listed in ``expose`` will also be exposed for direct import; ie: ``install(expose=['setuptools'])`` would make both ``setuptools`` and ``wheel`` available for import via ``from pex.third_party import setuptools, wheel``, but only ``setuptools`` could be directly imported via ``import setuptools``. NB: Even when exposed, vendored code is not the same as the same un-vendored code and will properly fail type-tests against un-vendored types. For example, in an interpreter that has ``setuptools`` installed in its site-packages: >>> from pkg_resources import Requirement >>> orig_req = Requirement.parse('wheel==0.31.1') >>> from pex import third_party >>> third_party.install(expose=['setuptools']) >>> import sys >>> sys.modules.pop('pkg_resources') <module 'pkg_resources' from '/home/jsirois/dev/pantsbuild/jsirois-pex/.tox/py27-repl/lib/python2.7/site-packages/pkg_resources/__init__.pyc'> # noqa >>> from pkg_resources import Requirement >>> new_req = Requirement.parse('wheel==0.31.1') >>> new_req == orig_req False >>> new_req == Requirement.parse('wheel==0.31.1') True >>> type(orig_req) <class 'pkg_resources.Requirement'> >>> type(new_req) <class 'pex.vendor._vendored.setuptools.pkg_resources.Requirement'> >>> from pex.third_party.pkg_resources import Requirement as PrefixedRequirement >>> new_req == PrefixedRequirement.parse('wheel==0.31.1') True >>> sys.modules.pop('pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> sys.modules.pop('pex.third_party.pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> :param expose: A list of vendored distribution names to expose directly on the ``sys.path``. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. """ VendorImporter.install_vendored(prefix=import_prefix(), root=root, expose=expose)
Installs the default :class:`VendorImporter` for PEX vendored code. Any distributions listed in ``expose`` will also be exposed for direct import; ie: ``install(expose=['setuptools'])`` would make both ``setuptools`` and ``wheel`` available for import via ``from pex.third_party import setuptools, wheel``, but only ``setuptools`` could be directly imported via ``import setuptools``. NB: Even when exposed, vendored code is not the same as the same un-vendored code and will properly fail type-tests against un-vendored types. For example, in an interpreter that has ``setuptools`` installed in its site-packages: >>> from pkg_resources import Requirement >>> orig_req = Requirement.parse('wheel==0.31.1') >>> from pex import third_party >>> third_party.install(expose=['setuptools']) >>> import sys >>> sys.modules.pop('pkg_resources') <module 'pkg_resources' from '/home/jsirois/dev/pantsbuild/jsirois-pex/.tox/py27-repl/lib/python2.7/site-packages/pkg_resources/__init__.pyc'> # noqa >>> from pkg_resources import Requirement >>> new_req = Requirement.parse('wheel==0.31.1') >>> new_req == orig_req False >>> new_req == Requirement.parse('wheel==0.31.1') True >>> type(orig_req) <class 'pkg_resources.Requirement'> >>> type(new_req) <class 'pex.vendor._vendored.setuptools.pkg_resources.Requirement'> >>> from pex.third_party.pkg_resources import Requirement as PrefixedRequirement >>> new_req == PrefixedRequirement.parse('wheel==0.31.1') True >>> sys.modules.pop('pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> sys.modules.pop('pex.third_party.pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> :param expose: A list of vendored distribution names to expose directly on the ``sys.path``. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found.
def remove_counter(self, key, path, consistency_level): """ Remove a counter at the specified location. Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update until the delete has reached all the nodes and all of them have been fully compacted. Parameters: - key - path - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_remove_counter(key, path, consistency_level) return d
Remove a counter at the specified location. Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update until the delete has reached all the nodes and all of them have been fully compacted. Parameters: - key - path - consistency_level
def create_script(create=None): # noqa: E501 """Create a new script Create a new script # noqa: E501 :param Scripts: The data needed to create this script :type Scripts: dict | bytes :rtype: Response """ if connexion.request.is_json: create = Create.from_dict(connexion.request.get_json()) # noqa: E501 if(not hasAccess()): return redirectUnauthorized() driver = LoadedDrivers.getDefaultDriver() driver.saveScript(create.script.name, create.script.content) return Response(status=200, body={'file-name': create.script.name})
Create a new script Create a new script # noqa: E501 :param Scripts: The data needed to create this script :type Scripts: dict | bytes :rtype: Response
def create(cls, name, nat=False, mobile_vpn_toplogy_mode=None, vpn_profile=None): """ Create a new policy based VPN :param name: name of vpn policy :param bool nat: whether to apply NAT to the VPN (default False) :param mobile_vpn_toplogy_mode: whether to allow remote vpn :param VPNProfile vpn_profile: reference to VPN profile, or uses default :rtype: PolicyVPN """ vpn_profile = element_resolver(vpn_profile) or \ VPNProfile('VPN-A Suite').href json = {'mobile_vpn_topology_mode': mobile_vpn_toplogy_mode, 'name': name, 'nat': nat, 'vpn_profile': vpn_profile} try: return ElementCreator(cls, json) except CreateElementFailed as err: raise CreatePolicyFailed(err)
Create a new policy based VPN :param name: name of vpn policy :param bool nat: whether to apply NAT to the VPN (default False) :param mobile_vpn_toplogy_mode: whether to allow remote vpn :param VPNProfile vpn_profile: reference to VPN profile, or uses default :rtype: PolicyVPN
def gsea_significance(enrichment_scores, enrichment_nulls): """Compute nominal pvals, normalized ES, and FDR q value. For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0. """ # For a zero by zero division (undetermined, results in a NaN), np.seterr(divide='ignore', invalid='ignore') # import warnings # warnings.simplefilter("ignore") es = np.array(enrichment_scores) esnull = np.array(enrichment_nulls) logging.debug("Start to compute pvals..................................") # compute pvals. enrichmentPVals = gsea_pval(es, esnull).tolist() logging.debug("Compute nes and nesnull.................................") # nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull) # new normalized enrichment score implementation. # this could speed up significantly. esnull_pos = (esnull*(esnull>=0)).mean(axis=1) esnull_neg = (esnull*(esnull<0)).mean(axis=1) nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg) nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis], -esnull/esnull_neg[:,np.newaxis]) logging.debug("start to compute fdrs..................................") # FDR null distribution histogram # create a histogram of all NES(S,pi) over all S and pi # Use this null distribution to compute an FDR q value, # vals = reduce(lambda x,y: x+y, nEnrichmentNulls, []) # nvals = np.array(sorted(vals)) # or nvals = np.sort(nEnrichmentNulls.flatten()) nnes = np.sort(nEnrichmentScores) fdrs = [] # FDR computation for i in range(len(enrichment_scores)): nes = nEnrichmentScores[i] # use the same pval method to calculate fdr if nes >= 0: allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left")) allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left")) nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left")) nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left")) # allPos = (nvals >= 0).sum() # allHigherAndPos = (nvals >= nes).sum() # nesPos = (nnes >=0).sum() # nesHigherAndPos = (nnes >= nes).sum() else: allPos = int(np.searchsorted(nvals, 0, side="left")) allHigherAndPos = int(np.searchsorted(nvals, nes, side="right")) nesPos = int(np.searchsorted(nnes, 0, side="left")) nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right")) # allPos = (nvals < 0).sum() # allHigherAndPos = (nvals < nes).sum() # nesPos = (nnes < 0).sum() # nesHigherAndPos = (nnes < nes).sum() try: pi_norm = allHigherAndPos/float(allPos) pi_obs = nesHigherAndPos/float(nesPos) fdr = pi_norm / pi_obs fdrs.append(fdr if fdr < 1 else 1.0) except: fdrs.append(1000000000.0) logging.debug("Statistical testing finished.............................") return zip(enrichment_scores, nEnrichmentScores, enrichmentPVals, fdrs)
Compute nominal pvals, normalized ES, and FDR q value. For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
def _register_key(fingerprint, gpg): """Registers key in config""" for private_key in gpg.list_keys(True): try: if str(fingerprint) == private_key['fingerprint']: config["gpg_key_fingerprint"] = \ repr(private_key['fingerprint']) except KeyError: pass
Registers key in config
def file_md5sum(filename): """ :param filename: The filename of the file to process :returns: The MD5 hash of the file """ hash_md5 = hashlib.md5() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(1024 * 4), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
:param filename: The filename of the file to process :returns: The MD5 hash of the file
def scrollTextIntoView(self, text): ''' Performs a forward scroll action on the scrollable layout element until the text you provided is visible, or until swipe attempts have been exhausted. See setMaxSearchSwipes(int) ''' if self.vc is None: raise ValueError('vc must be set in order to use this method') for n in range(self.maxSearchSwipes): # FIXME: now I need to figure out the best way of navigating to the ViewClient asossiated # with this UiScrollable. # It's using setViewClient() now. if DEBUG or DEBUG_CHANGE_LANGUAGE: print >> sys.stderr, u"Searching for text='%s'" % text for v in self.vc.views: try: print >> sys.stderr, " scrollTextIntoView: v=", v.getId(), print >> sys.stderr, v.getText() except Exception, e: print >> sys.stderr, e pass #v = self.vc.findViewWithText(text, root=self.view) v = self.vc.findViewWithText(text) if v is not None: return v self.flingForward() #self.vc.sleep(1) self.vc.dump(-1) # WARNING: after this dump, the value kept in self.view is outdated, it should be refreshed # in some way return None
Performs a forward scroll action on the scrollable layout element until the text you provided is visible, or until swipe attempts have been exhausted. See setMaxSearchSwipes(int)
def check_version(url=VERSION_URL): """Returns the version string for the latest SDK.""" for line in get(url): if 'release:' in line: return line.split(':')[-1].strip(' \'"\r\n')
Returns the version string for the latest SDK.
def processFlat(self): """Main process.for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ self.config["hier"] = False est_idxs, est_labels, F = self.process() assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[1] - 1 return self._postprocess(est_idxs, est_labels)
Main process.for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
def send_calibrate_accelerometer(self, simple=False): """Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration """ calibration_command = self.message_factory.command_long_encode( self._handler.target_system, 0, # target_system, target_component mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, # command 0, # confirmation 0, # param 1, 1: gyro calibration, 3: gyro temperature calibration 0, # param 2, 1: magnetometer calibration 0, # param 3, 1: ground pressure calibration 0, # param 4, 1: radio RC calibration, 2: RC trim calibration 4 if simple else 1, # param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration 0, # param 6, 2: airspeed calibration 0, # param 7, 1: ESC calibration, 3: barometer temperature calibration ) self.send_mavlink(calibration_command)
Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration
def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the cosine distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ if self.normalize: X = normalize(X) check_is_fitted(self, "cluster_centers_") X = self._check_test_data(X) return self._transform(X)
Transform X to a cluster-distance space. In the new space, each dimension is the cosine distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space.
def insert_break(lines, break_pos=9): """ Insert a <!--more--> tag for larger release notes. Parameters ---------- lines : list of str The content of the release note. break_pos : int Line number before which a break should approximately be inserted. Returns ------- list of str The text with the inserted tag or no modification if it was sufficiently short. """ def line_filter(line): if len(line) == 0: return True return any(line.startswith(c) for c in "-*+") if len(lines) <= break_pos: return lines newlines = [ i for i, line in enumerate(lines[break_pos:], start=break_pos) if line_filter(line.strip())] if len(newlines) > 0: break_pos = newlines[0] lines.insert(break_pos, "<!--more-->\n") return lines
Insert a <!--more--> tag for larger release notes. Parameters ---------- lines : list of str The content of the release note. break_pos : int Line number before which a break should approximately be inserted. Returns ------- list of str The text with the inserted tag or no modification if it was sufficiently short.
def ser2ber(q,n,d,t,ps): """ Converts symbol error rate to bit error rate. Taken from Ziemer and Tranter page 650. Necessary when comparing different types of block codes. parameters ---------- q: size of the code alphabet for given modulation type (BPSK=2) n: number of channel bits d: distance (2e+1) where e is the number of correctable errors per code word. For hamming codes, e=1, so d=3. t: number of correctable errors per code word ps: symbol error probability vector returns ------- ber: bit error rate """ lnps = len(ps) # len of error vector ber = np.zeros(lnps) # inialize output vector for k in range(0,lnps): # iterate error vector ser = ps[k] # channel symbol error rate sum1 = 0 # initialize sums sum2 = 0 for i in range(t+1,d+1): term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i) sum1 = sum1 + term for i in range(d+1,n+1): term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i)) sum2 = sum2+term ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2) return ber
Converts symbol error rate to bit error rate. Taken from Ziemer and Tranter page 650. Necessary when comparing different types of block codes. parameters ---------- q: size of the code alphabet for given modulation type (BPSK=2) n: number of channel bits d: distance (2e+1) where e is the number of correctable errors per code word. For hamming codes, e=1, so d=3. t: number of correctable errors per code word ps: symbol error probability vector returns ------- ber: bit error rate
def get_step_index(self, step=None): """ Returns the index for the given `step` name. If no step is given, the current step will be used to get the index. """ if step is None: step = self.steps.current return self.get_form_list().keyOrder.index(step)
Returns the index for the given `step` name. If no step is given, the current step will be used to get the index.
def _SetupPaths(): """Sets up the sys.path with special directories for endpointscfg.py.""" sdk_path = _FindSdkPath() if sdk_path: sys.path.append(sdk_path) try: import dev_appserver # pylint: disable=g-import-not-at-top if hasattr(dev_appserver, 'fix_sys_path'): dev_appserver.fix_sys_path() else: logging.warning(_NO_FIX_SYS_PATH_WARNING) except ImportError: logging.warning(_IMPORT_ERROR_WARNING) else: logging.warning(_NOT_FOUND_WARNING) # Add the path above this directory, so we can import the endpoints package # from the user's app code (rather than from another, possibly outdated SDK). # pylint: disable=g-import-not-at-top from google.appengine.ext import vendor vendor.add(os.path.dirname(os.path.dirname(__file__)))
Sets up the sys.path with special directories for endpointscfg.py.
def update_mapping_meta(self, doc_type, values, indices=None): """ Update mapping meta :param doc_type: a doc type or a list of doctypes :param values: the dict of meta :param indices: a list of indices :return: """ indices = self._validate_indices(indices) for index in indices: mapping = self.mappings.get_doctype(index, doc_type) if mapping is None: continue meta = mapping.get_meta() meta.update(values) mapping = {doc_type: {"_meta": meta}} self.indices.put_mapping(doc_type=doc_type, mapping=mapping, indices=indices)
Update mapping meta :param doc_type: a doc type or a list of doctypes :param values: the dict of meta :param indices: a list of indices :return:
def write_chunks(out, chunks): """Create a PNG file by writing out the chunks.""" out.write(png_signature) for chunk in chunks: write_chunk(out, *chunk)
Create a PNG file by writing out the chunks.
def install_napp(cls, mgr): """Install a NApp. Raises: KytosException: If a NApp hasn't been found. """ try: LOG.info(' Searching local NApp...') mgr.install_local() LOG.info(' Found and installed.') except FileNotFoundError: LOG.info(' Not found. Downloading from NApps Server...') try: mgr.install_remote() LOG.info(' Downloaded and installed.') return except HTTPError as exception: if exception.code == 404: LOG.error(' NApp not found.') else: LOG.error(' NApps Server error: %s', exception) except URLError as exception: LOG.error(' NApps Server error: %s', str(exception.reason)) raise KytosException("NApp not found.")
Install a NApp. Raises: KytosException: If a NApp hasn't been found.
def change_key(self, key): """ re-encrypt stored services and orgs with the new key """ services = {} for service_name in self.list_services(): services[service_name] = self.get_service(service_name) orgs = {} for org_name in self.list_orgs(): orgs[org_name] = self.get_org(org_name) self.key = key if orgs: for org_name, org_config in list(orgs.items()): self.set_org(org_config) if services: for service_name, service_config in list(services.items()): self.set_service(service_name, service_config) self._convert_connected_app()
re-encrypt stored services and orgs with the new key
def get_protein_seq_for_transcript(self, transcript_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "text/plain"} self.attempt = 0 ext = "/sequence/id/{}?type=protein".format(transcript_id) return self.ensembl_request(ext, headers)
obtain the sequence for a transcript from ensembl
def wsgi_server_target(self, wsgi_environment, response_start): """ Searches route in self.routes and passes found target wsgi_environment, response_start and route_result. explanation for route_result is in Route class. result from ranning this method is simply passed from target to carafe calling fuctionality. see Carafe class for explanations to this. """ route_result = {} request_method = wsgi_environment['REQUEST_METHOD'] path_info = wsgi_environment['PATH_INFO'] if self.PATH_INFO_mode == 'std': path_info = path_info.encode('latin1').decode('utf-8') wsgi_environment['PATH_INFO'] = path_info if path_info.startswith('/'): path_info = path_info[1:] if path_info in ['/', '']: splitted_path_info = [] else: splitted_path_info = path_info.split('/') for i in range(len(splitted_path_info)): splitted_path_info[i] = self.unquote_callable( splitted_path_info[i] ) target = self.default_target routing_error = False # if len(splitted_path_info) != 0 and len(self.routes) != 0: if len(self.routes) != 0: path_segment_to_check_position = 0 filter_result = copy.copy(self.routes) # filter by method for i in range(len(filter_result) - 1, -1, -1): filter_result_i = filter_result[i] if ((filter_result_i.method in [None, False]) or (filter_result_i.method != True and not request_method in filter_result_i.method)): del filter_result[i] # filter by path settings for i in range(len(splitted_path_info) - 1, -1, -1): if len(filter_result) == 0: break filter_result = _filter_routes_by_segment( splitted_path_info[i], filter_result, i ) if (len(filter_result) == 1 and filter_result[0].path_settings[i][0] == 'path'): break for i in range(len(filter_result) - 1, -1, -1): if (len(filter_result[i].path_settings) > len(splitted_path_info)): del filter_result[i] selected_route = None filter_result_l = len(filter_result) if filter_result_l == 0: routing_error = True logging.error("route not found") elif filter_result_l == 1: selected_route = filter_result[0] else: routing_error = True logging.error("can't find matching route") #print("selected_route 1: {}".format(selected_route)) #print("route_result 1: {}".format(route_result)) if selected_route is not None: target = selected_route.target for i in range(len(selected_route.path_settings)): selected_route_path_settings_i = \ selected_route.path_settings[i] if type(selected_route_path_settings_i[2]) == str: selected_route_path_settings_i_0 = \ selected_route_path_settings_i[0] selected_route_path_settings_i_2 = \ selected_route_path_settings_i[2] if selected_route_path_settings_i_0 == 'path': route_result[selected_route_path_settings_i_2] = \ splitted_path_info[i:] break elif selected_route_path_settings_i_0 == '=': route_result[selected_route_path_settings_i_2] = \ (selected_route.path_settings[i] == splitted_path_info[i]) elif selected_route_path_settings_i_0 == 'rer': route_result[selected_route_path_settings_i_2] = \ selected_route.path_settings[i].match( splitted_path_info[i] ) elif selected_route_path_settings_i_0 in ['fm', 're']: route_result[selected_route_path_settings_i_2] = \ splitted_path_info[i] else: raise Exception("programming error") #print("route_result 2: {}".format(route_result)) if routing_error: logging.error( "routing error\n" " asked route is: {}\n" " starting route list is: {}\n" " resulting route list is: {}".format( path_info, self.routes, filter_result ) ) if len(self.routes) == 0: logging.warning("routes not specified") return target(wsgi_environment, response_start, route_result)
Searches route in self.routes and passes found target wsgi_environment, response_start and route_result. explanation for route_result is in Route class. result from ranning this method is simply passed from target to carafe calling fuctionality. see Carafe class for explanations to this.
def inherit_set(base, namespace, attr_name, inherit=lambda i: True): """ Perform inheritance of sets. Returns a list of items that were inherited, for post-processing. :param base: The base class being considered; see ``iter_bases()``. :param namespace: The dictionary of the new class being built. :param attr_name: The name of the attribute containing the set to be inherited. :param inherit: Filtering function to determine if a given item should be inherited. If ``False`` or ``None``, item will not be added, but will be included in the returned items. If a function, the function will be called with the item, and the item will be added and included in the items list only if the function returns ``True``. By default, all items are added and included in the items list. """ items = [] # Get the sets to compare base_set = getattr(base, attr_name, set()) new_set = namespace.setdefault(attr_name, set()) for item in base_set: # Skip items that have been overridden or that we # shouldn't inherit if item in new_set or (inherit and not inherit(item)): continue # Inherit the item if inherit: new_set.add(item) items.append(item) return items
Perform inheritance of sets. Returns a list of items that were inherited, for post-processing. :param base: The base class being considered; see ``iter_bases()``. :param namespace: The dictionary of the new class being built. :param attr_name: The name of the attribute containing the set to be inherited. :param inherit: Filtering function to determine if a given item should be inherited. If ``False`` or ``None``, item will not be added, but will be included in the returned items. If a function, the function will be called with the item, and the item will be added and included in the items list only if the function returns ``True``. By default, all items are added and included in the items list.
def send_request(self, worker_class_or_function, args, on_receive=None): """ Requests some work to be done by the backend. You can get notified of the work results by passing a callback (on_receive). :param worker_class_or_function: Worker class or function :param args: worker args, any Json serializable objects :param on_receive: an optional callback executed when we receive the worker's results. The callback will be called with one arguments: the results of the worker (object) :raise: backend.NotRunning if the backend process is not running. """ if not self.running: try: # try to restart the backend if it crashed. self.start(self.server_script, interpreter=self.interpreter, args=self.args) except AttributeError: pass # not started yet finally: # caller should try again, later raise NotRunning() else: comm('sending request, worker=%r' % worker_class_or_function) # create a socket, the request will be send as soon as the socket # has connected socket = JsonTcpClient( self.editor, self._port, worker_class_or_function, args, on_receive=on_receive) socket.finished.connect(self._rm_socket) self._sockets.append(socket) # restart heartbeat timer self._heartbeat_timer.start()
Requests some work to be done by the backend. You can get notified of the work results by passing a callback (on_receive). :param worker_class_or_function: Worker class or function :param args: worker args, any Json serializable objects :param on_receive: an optional callback executed when we receive the worker's results. The callback will be called with one arguments: the results of the worker (object) :raise: backend.NotRunning if the backend process is not running.
def find_words(text, suspect_words, excluded_words=[]): """Check if a text has some of the suspect words (or words that starts with one of the suspect words). You can set some words to be excluded of the search, so you can remove false positives like 'important' be detected when you search by 'import'. It will return True if the number of suspect words found is greater than the number of excluded words. Otherwise, it will return False. Args: text (str): a string with the text to be analysed. It will be converted to lowercase. suspect_words: a list of strings that you want to check the presence in the text. excluded_words: a list of strings to be whitelisted. """ text = text.lower() suspect_found = [i for i in re.finditer(make_regex(suspect_words), text)] if len(excluded_words) > 0: excluded_found = [i for i in re.finditer(make_regex(excluded_words), text)] if len(suspect_found) > len(excluded_found): return True else: return False else: if len(suspect_found) > 0: return True else: return False
Check if a text has some of the suspect words (or words that starts with one of the suspect words). You can set some words to be excluded of the search, so you can remove false positives like 'important' be detected when you search by 'import'. It will return True if the number of suspect words found is greater than the number of excluded words. Otherwise, it will return False. Args: text (str): a string with the text to be analysed. It will be converted to lowercase. suspect_words: a list of strings that you want to check the presence in the text. excluded_words: a list of strings to be whitelisted.
def delete(filething): """ delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file. """ t = OggFLAC(filething) filething.fileobj.seek(0) t.delete(filething)
delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file.
async def variant(self, elem=None, elem_type=None, params=None, obj=None): """ Loads/dumps variant type :param elem: :param elem_type: :param params: :param obj: :return: """ elem_type = elem_type if elem_type else elem.__class__ if hasattr(elem_type, 'kv_serialize'): elem = elem_type() if elem is None else elem return await elem.kv_serialize(self, elem=elem, elem_type=elem_type, params=params, obj=obj) if self.writing: return await self.dump_variant(elem=elem, elem_type=elem_type if elem_type else elem.__class__, params=params, obj=obj) else: return await self.load_variant(elem_type=elem_type if elem_type else elem.__class__, params=params, elem=elem, obj=obj)
Loads/dumps variant type :param elem: :param elem_type: :param params: :param obj: :return:
def count_seeds(usort): """ uses bash commands to quickly count N seeds from utemp file """ with open(usort, 'r') as insort: cmd1 = ["cut", "-f", "2"] cmd2 = ["uniq"] cmd3 = ["wc"] proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True) proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True) proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True) res = proc3.communicate() nseeds = int(res[0].split()[0]) proc1.stdout.close() proc2.stdout.close() proc3.stdout.close() return nseeds
uses bash commands to quickly count N seeds from utemp file
def write_config(self): """Write the current configuration to the config file.""" config_file = os.path.join(self.config_dir, 'pueue.ini') with open(config_file, 'w') as file_descriptor: self.config.write(file_descriptor)
Write the current configuration to the config file.
def get_include_files(): """"Get the list of trust stores so they properly packaged when doing a cx_freeze build. """ plugin_data_files = [] trust_stores_pem_path = path.join(root_path, 'sslyze', 'plugins', 'utils', 'trust_store', 'pem_files') for file in listdir(trust_stores_pem_path): file = path.join(trust_stores_pem_path, file) if path.isfile(file): # skip directories filename = path.basename(file) plugin_data_files.append((file, path.join('pem_files', filename))) return plugin_data_files
Get the list of trust stores so they properly packaged when doing a cx_freeze build.
def r2_score(y_true, y_pred): """R² for Bayesian regression models. Only valid for linear models. Parameters ---------- y_true: : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. Returns ------- Pandas Series with the following indices: r2: Bayesian R² r2_std: standard deviation of the Bayesian R². """ if y_pred.ndim == 1: var_y_est = np.var(y_pred) var_e = np.var(y_true - y_pred) else: var_y_est = np.var(y_pred.mean(0)) var_e = np.var(y_true - y_pred, 0) r_squared = var_y_est / (var_y_est + var_e) return pd.Series([np.mean(r_squared), np.std(r_squared)], index=["r2", "r2_std"])
R² for Bayesian regression models. Only valid for linear models. Parameters ---------- y_true: : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. Returns ------- Pandas Series with the following indices: r2: Bayesian R² r2_std: standard deviation of the Bayesian R².
def set_args(self, **kwargs): """ Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value """ try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value
def post_user_login(sender, request, user, **kwargs): """ Create a profile for the user, when missing. Make sure that all neccessary user groups exist and have the right permissions. We need that automatism for people not calling the configure tool, admin rights for admins after the first login, and similar cases. """ logger.debug("Running post-processing for user login.") # Users created by social login or admins have no profile. # We fix that during their first login. try: with transaction.atomic(): profile, created = UserProfile.objects.get_or_create(user=user) if created: logger.info("Created missing profile for user " + str(user.pk)) except Exception as e: logger.error("Error while creating user profile: " + str(e)) check_permission_system()
Create a profile for the user, when missing. Make sure that all neccessary user groups exist and have the right permissions. We need that automatism for people not calling the configure tool, admin rights for admins after the first login, and similar cases.
def contains(this, that, axis=semantics.axis_default): """Returns bool for each element of `that`, indicating if it is contained in `this` Parameters ---------- this : indexable key sequence sequence of items to test against that : indexable key sequence sequence of items to test for Returns ------- ndarray, [that.size], bool returns a bool for each element in `that`, indicating if it is contained in `this` Notes ----- Reads as 'this contains that' Similar to 'that in this', but with different performance characteristics """ this = as_index(this, axis=axis, lex_as_struct=True, base=True) that = as_index(that, axis=axis, lex_as_struct=True) left = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='left') right = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='right') flags = np.zeros(that.size + 1, dtype=np.int) np.add.at(flags, left, 1) np.add.at(flags, right, -1) return np.cumsum(flags)[:-1].astype(np.bool)[that.rank]
Returns bool for each element of `that`, indicating if it is contained in `this` Parameters ---------- this : indexable key sequence sequence of items to test against that : indexable key sequence sequence of items to test for Returns ------- ndarray, [that.size], bool returns a bool for each element in `that`, indicating if it is contained in `this` Notes ----- Reads as 'this contains that' Similar to 'that in this', but with different performance characteristics
def available_tcp_port(reactor): """ Returns a Deferred firing an available TCP port on localhost. It does so by listening on port 0; then stopListening and fires the assigned port number. """ endpoint = serverFromString(reactor, 'tcp:0:interface=127.0.0.1') port = yield endpoint.listen(NoOpProtocolFactory()) address = port.getHost() yield port.stopListening() defer.returnValue(address.port)
Returns a Deferred firing an available TCP port on localhost. It does so by listening on port 0; then stopListening and fires the assigned port number.
def prepend(self, _, child, name=None): """Adds childs to this tag, starting from the first position.""" self._insert(child, prepend=True, name=name) return self
Adds childs to this tag, starting from the first position.
def WhereIs(self, prog, path=None, pathext=None, reject=[]): """Find prog in the path. """ if path is None: try: path = self['ENV']['PATH'] except KeyError: pass elif SCons.Util.is_String(path): path = self.subst(path) if pathext is None: try: pathext = self['ENV']['PATHEXT'] except KeyError: pass elif SCons.Util.is_String(pathext): pathext = self.subst(pathext) prog = SCons.Util.CLVar(self.subst(prog)) # support "program --with-args" path = SCons.Util.WhereIs(prog[0], path, pathext, reject) if path: return path return None
Find prog in the path.
def register_hit_type( self, title, description, reward, duration_hours, keywords, qualifications ): """Register HIT Type for this HIT and return the type's ID, which is required for creating a HIT. """ reward = str(reward) duration_secs = int(datetime.timedelta(hours=duration_hours).total_seconds()) hit_type = self.mturk.create_hit_type( Title=title, Description=description, Reward=reward, AssignmentDurationInSeconds=duration_secs, Keywords=",".join(keywords), AutoApprovalDelayInSeconds=0, QualificationRequirements=qualifications, ) return hit_type["HITTypeId"]
Register HIT Type for this HIT and return the type's ID, which is required for creating a HIT.
def advance(parser): # type: (Parser) -> None """Moves the internal parser object to the next lexed token.""" prev_end = parser.token.end parser.prev_end = prev_end parser.token = parser.lexer.next_token(prev_end)
Moves the internal parser object to the next lexed token.
def get_key_auth_cb(key_filepath): """This is just a convenience function for key-based login.""" def auth_cb(ssh): key = ssh_pki_import_privkey_file(key_filepath) ssh.userauth_publickey(key) return auth_cb
This is just a convenience function for key-based login.
def label_components(self, display = None): ''' API: label_components(self, display=None) Description: This method labels the nodes of an undirected graph with component numbers so that each node has the same label as all nodes in the same component. It will display the algortihm if display argument is provided. Input: display: display method. Pre: self.graph_type should be UNDIRECTED_GRAPH. Post: Nodes will have 'component' attribute that will have component number as value. ''' if self.graph_type == DIRECTED_GRAPH: raise Exception("label_components only works for ", "undirected graphs") self.num_components = 0 for n in self.get_node_list(): self.get_node(n).set_attr('component', None) for n in self.neighbors: self.get_node(n).set_attr('label', '-') for n in self.get_node_list(): if self.get_node(n).get_attr('component') == None: self.search(n, display=display, component=self.num_components, algo='DFS') self.num_components += 1
API: label_components(self, display=None) Description: This method labels the nodes of an undirected graph with component numbers so that each node has the same label as all nodes in the same component. It will display the algortihm if display argument is provided. Input: display: display method. Pre: self.graph_type should be UNDIRECTED_GRAPH. Post: Nodes will have 'component' attribute that will have component number as value.
def state(): '''Get The playback state: 'playing', 'paused', or 'stopped'. If PLAYING or PAUSED, show information on current track. Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get PlaybackController.get_current_track() and PlaybackController.get_time_position()''' server = getServer() state = server.core.playback.get_state() logging.debug('Got playback state: %r', state) if state.upper() == 'STOPPED': print('Playback is currently stopped') else: track = server.core.playback.get_current_track() logging.debug('Track is %r', track) logging.debug('Track loaded is %r', jsonrpclib.jsonclass.load(track)) pos = server.core.playback.get_time_position() logging.debug('Pos is %r', pos) print('{} track: "{}", by {} (at {})'.format(state.title(), track['name'], ','.join([a['name'] for a in track['artists']]), formatTimeposition(pos)) )
Get The playback state: 'playing', 'paused', or 'stopped'. If PLAYING or PAUSED, show information on current track. Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get PlaybackController.get_current_track() and PlaybackController.get_time_position()
def find(self, item, description='', event_type=''): """ Find regexp in activitylog find record as if type are in description. """ # TODO: should be refactored, dumb logic if ': ' in item: splited = item.split(': ', 1) if splited[0] in self.TYPES: description = item.split(': ')[1] event_type = item.split(': ')[0] else: description = item else: if not description: description = item if event_type: found = [x['time'] for x in self.log if re.search(description, x['description']) and x['eventTypeText'] == event_type] else: found = [x['time'] for x in self.log if re.search(description, x['description'])] if len(found): return found raise exceptions.NotFoundError("Item '{}' is not found with (description='{}', event_type='{}')". format(item, description, event_type))
Find regexp in activitylog find record as if type are in description.
def htmlCtxtUseOptions(self, options): """Applies the options to the parser context """ ret = libxml2mod.htmlCtxtUseOptions(self._o, options) return ret
Applies the options to the parser context
def update_metric(self, metric, labels, pre_sliced=False): """Update metric with the current executor.""" self.curr_execgrp.update_metric(metric, labels, pre_sliced)
Update metric with the current executor.
def send_signal(self, s): """ Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised. """ self._get_signal_event(s) # Check if signal has been enabled pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') os.kill(pid, s)
Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised.
def cookie_attr_value_check(attr_name, attr_value): """ Check cookie attribute value for validity. Return True if value is valid :param attr_name: attribute name to check :param attr_value: attribute value to check :return: bool """ attr_value.encode('us-ascii') return WHTTPCookie.cookie_attr_value_compliance[attr_name].match(attr_value) is not None
Check cookie attribute value for validity. Return True if value is valid :param attr_name: attribute name to check :param attr_value: attribute value to check :return: bool
def referenceLengths(self): """ Get the lengths of wanted references. @raise UnknownReference: If a reference id is not present in the SAM/BAM file. @return: A C{dict} of C{str} reference id to C{int} length with a key for each reference id in C{self.referenceIds} or for all references if C{self.referenceIds} is C{None}. """ result = {} with samfile(self.filename) as sam: if self.referenceIds: for referenceId in self.referenceIds: tid = sam.get_tid(referenceId) if tid == -1: raise UnknownReference( 'Reference %r is not present in the SAM/BAM file.' % referenceId) else: result[referenceId] = sam.lengths[tid] else: result = dict(zip(sam.references, sam.lengths)) return result
Get the lengths of wanted references. @raise UnknownReference: If a reference id is not present in the SAM/BAM file. @return: A C{dict} of C{str} reference id to C{int} length with a key for each reference id in C{self.referenceIds} or for all references if C{self.referenceIds} is C{None}.
def search(self, CorpNum, JobID, TradeType, TradeUsage, Page, PerPage, Order, UserID=None): """ 수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Cashbill/' + JobID uri += '?TradeType=' + ','.join(TradeType) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order return self._httpget(uri, CorpNum, UserID)
수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException
def parse_color(color): r"""Turns a color into an (r, g, b) tuple >>> parse_color('white') (255, 255, 255) >>> parse_color('#ff0000') (255, 0, 0) >>> parse_color('#f00') (255, 0, 0) >>> parse_color((255, 0, 0)) (255, 0, 0) >>> from fabulous import grapefruit >>> parse_color(grapefruit.Color((0.0, 1.0, 0.0))) (0, 255, 0) """ if isinstance(color, basestring): color = grapefruit.Color.NewFromHtml(color) if isinstance(color, int): (r, g, b) = xterm256.xterm_to_rgb(color) elif hasattr(color, 'rgb'): (r, g, b) = [int(c * 255.0) for c in color.rgb] else: (r, g, b) = color assert isinstance(r, int) and 0 <= r <= 255 assert isinstance(g, int) and 0 <= g <= 255 assert isinstance(b, int) and 0 <= b <= 255 return (r, g, b)
r"""Turns a color into an (r, g, b) tuple >>> parse_color('white') (255, 255, 255) >>> parse_color('#ff0000') (255, 0, 0) >>> parse_color('#f00') (255, 0, 0) >>> parse_color((255, 0, 0)) (255, 0, 0) >>> from fabulous import grapefruit >>> parse_color(grapefruit.Color((0.0, 1.0, 0.0))) (0, 255, 0)
def paste(xsel=False): """Returns system clipboard contents.""" selection = "primary" if xsel else "clipboard" try: return subprocess.Popen(["xclip", "-selection", selection, "-o"], stdout=subprocess.PIPE).communicate()[0].decode("utf-8") except OSError as why: raise XclipNotFound
Returns system clipboard contents.
def connect(self, exe_path=None, **kwargs): """ 直接连接登陆后的客户端 :param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe' :return: """ connect_path = exe_path or self._config.DEFAULT_EXE_PATH if connect_path is None: raise ValueError( "参数 exe_path 未设置,请设置客户端对应的 exe 地址,类似 C:\\客户端安装目录\\xiadan.exe" ) self._app = pywinauto.Application().connect( path=connect_path, timeout=10 ) self._close_prompt_windows() self._main = self._app.top_window()
直接连接登陆后的客户端 :param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe' :return:
def _rm_get_repeat_coords_from_header(parts): """ extract the repeat coordinates of a repeat masker match from a header line. An example header line is:: 239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4 239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4 if the match is to the reverse complement, the start and end coordinates are at positions 11 and 12 (zero-based indexes), otherwise they're at positions 9 and 10. In the later case, the 'start' is the earlier number and the end is the larger one. In reverse complement matches, RM lists the 'start' as the larger number and the end as the smaller one. We swap these around to match the Pyokit convention of start < end always and also adjust the end so it is not inclusive of the last position :param parts: the header line, as a tokenized list. :return: tuple of (start, end) """ assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14)) if len(parts) == 14: s = int(parts[9]) e = int(parts[10]) + 1 else: s = int(parts[12]) e = int(parts[11]) + 1 if (s >= e): raise AlignmentIteratorError("invalid repeatmakser header: " + " ".join(parts)) return (s, e)
extract the repeat coordinates of a repeat masker match from a header line. An example header line is:: 239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4 239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4 if the match is to the reverse complement, the start and end coordinates are at positions 11 and 12 (zero-based indexes), otherwise they're at positions 9 and 10. In the later case, the 'start' is the earlier number and the end is the larger one. In reverse complement matches, RM lists the 'start' as the larger number and the end as the smaller one. We swap these around to match the Pyokit convention of start < end always and also adjust the end so it is not inclusive of the last position :param parts: the header line, as a tokenized list. :return: tuple of (start, end)
def loads(data, use_datetime=0): """data -> unmarshalled data, method name Convert an XML-RPC packet to unmarshalled data plus a method name (None if not present). If the XML-RPC packet represents a fault condition, this function raises a Fault exception. """ p, u = getparser(use_datetime=use_datetime) p.feed(data) p.close() return u.close(), u.getmethodname()
data -> unmarshalled data, method name Convert an XML-RPC packet to unmarshalled data plus a method name (None if not present). If the XML-RPC packet represents a fault condition, this function raises a Fault exception.
def to_bitarray(data, width=8): ''' Convert data (list of integers, bytearray or integer) to bitarray ''' if isinstance(data, list) or isinstance(data, bytearray): data = combine_hex(data) return [True if digit == '1' else False for digit in bin(data)[2:].zfill(width)]
Convert data (list of integers, bytearray or integer) to bitarray
def _guess_x_simple(self, y_desired, y_dims=None, **kwargs): """Provide an initial guesses for a probable x from y""" _, indexes = self.fmodel.dataset.nn_y(y_desired, dims=y_dims, k = 10) return [self.fmodel.get_x(i) for i in indexes]
Provide an initial guesses for a probable x from y
def _write_csv(self, datasets, filename): """ Write CSV :param datasets: Datasets :param filename: File Name """ with open('/'.join([self.output, filename]), mode='w', encoding=self.encoding) as write_file: writer = csv.writer(write_file, delimiter=',') for i, row in enumerate(datasets): if i == 0: # header writer.writerow(list(row.keys())) writer.writerow(list(row.values()))
Write CSV :param datasets: Datasets :param filename: File Name
def load_from_config(self, config): """Load model from passed configuration.""" self.site = config.get("id", False) self.classification = config.get("class", False) self.tags = config.get("tags", False) self._load_key_value( config.get("key_value_data", False) ) self._load_data( config.get("specific_data", False) ) self._load_crumb( config.get("crumbs", False) ) self._load_media_list( config.get("media", False) ) self.unique_field = self.data_dict.get( config.get("unique_field", False), False )
Load model from passed configuration.
def pstdev(data, mu=None): """Return the square root of the population variance. See ``pvariance`` for arguments and other details. """ var = pvariance(data, mu) try: return var.sqrt() except AttributeError: return math.sqrt(var)
Return the square root of the population variance. See ``pvariance`` for arguments and other details.
def smoothed_hazard_(self, bandwidth): """ Parameters ----------- bandwidth: float the bandwith used in the Epanechnikov kernel. Returns ------- DataFrame: a DataFrame of the smoothed hazard """ timeline = self.timeline cumulative_hazard_name = self.cumulative_hazard_.columns[0] hazard_name = "differenced-" + cumulative_hazard_name hazard_ = self.cumulative_hazard_.diff().fillna(self.cumulative_hazard_.iloc[0]) C = (hazard_[cumulative_hazard_name] != 0.0).values return pd.DataFrame( 1.0 / bandwidth * np.dot(epanechnikov_kernel(timeline[:, None], timeline[C][None, :], bandwidth), hazard_.values[C, :]), columns=[hazard_name], index=timeline, )
Parameters ----------- bandwidth: float the bandwith used in the Epanechnikov kernel. Returns ------- DataFrame: a DataFrame of the smoothed hazard
def add_if_unique(self, name): """ Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace. """ with self.lock: if name not in self.names: self.names.append(name) return True return False
Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace.
def send_single_value(self, channel: int, value: int) -> int: """ Send a single value to the uDMX :param channel: DMX channel number, 1-512 :param value: Value to be sent to channel, 0-255 :return: number of bytes actually sent """ SetSingleChannel = 1 n = self._send_control_message(SetSingleChannel, value_or_length=value, channel=channel, data_or_length=1) return n
Send a single value to the uDMX :param channel: DMX channel number, 1-512 :param value: Value to be sent to channel, 0-255 :return: number of bytes actually sent
def get_users(session, query): """ Get one or more users """ # GET /api/users/0.1/users response = make_get_request(session, 'users', params_data=query) json_data = response.json() if response.status_code == 200: return json_data['result'] else: raise UsersNotFoundException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
Get one or more users
def t_ID(self, t): r"""[a-zA-Z_][a-zA-Z_0-9]*""" # If the value is a reserved name, give it the appropriate type (not ID) if t.value in self.reserved: t.type = self.reserved[t.value] # If it's a function, give it the FUNC type elif t.value in self.functions: t.type = 'FUNC' return t
r"""[a-zA-Z_][a-zA-Z_0-9]*
def topil(self, **kwargs): """ Get PIL Image. :return: :py:class:`PIL.Image`, or `None` if the composed image is not available. """ if self.has_preview(): return pil_io.convert_image_data_to_pil(self._record, **kwargs) return None
Get PIL Image. :return: :py:class:`PIL.Image`, or `None` if the composed image is not available.
def to_julian_date(self): """ Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return (day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day
def wraps(wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Decorator factory to apply update_wrapper() to a wrapper function Returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments. Default arguments are as for update_wrapper(). This is a convenience function to simplify applying partial() to update_wrapper(). """ return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)
Decorator factory to apply update_wrapper() to a wrapper function Returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments. Default arguments are as for update_wrapper(). This is a convenience function to simplify applying partial() to update_wrapper().
def filter_recordings(recordings): """Remove all recordings which have points without time. Parameters ---------- recordings : list of dicts Each dictionary has the keys 'data' and 'segmentation' Returns ------- list of dicts : Only recordings where all points have time values. """ new_recordings = [] for recording in recordings: recording['data'] = json.loads(recording['data']) tmp = json.loads(recording['segmentation']) recording['segmentation'] = normalize_segmentation(tmp) had_none = False for stroke in recording['data']: for point in stroke: if point['time'] is None: logging.debug("Had None-time: %i", recording['id']) had_none = True break if had_none: break if not had_none: new_recordings.append(recording) recordings = new_recordings logging.info("Done filtering") return recordings
Remove all recordings which have points without time. Parameters ---------- recordings : list of dicts Each dictionary has the keys 'data' and 'segmentation' Returns ------- list of dicts : Only recordings where all points have time values.
def current_frame(self): """ The current frame number that should be displayed.""" if not self.__fps: raise RuntimeError("fps not set so current frame number cannot be" " calculated") else: return int(self.__fps * self.time)
The current frame number that should be displayed.
def trim_wav_ms(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: """ Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction. """ try: trim_wav_sox(in_path, out_path, start_time, end_time) except FileNotFoundError: # Then sox isn't installed, so use pydub/ffmpeg trim_wav_pydub(in_path, out_path, start_time, end_time) except subprocess.CalledProcessError: # Then there is an issue calling sox. Perhaps the input file is an mp4 # or some other filetype not supported out-of-the-box by sox. So we try # using pydub/ffmpeg. trim_wav_pydub(in_path, out_path, start_time, end_time)
Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction.
def community_topic_posts(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/posts#list-posts" api_path = "/api/v2/community/topics/{id}/posts.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/posts#list-posts
def addToDB(abbr = None, dbname = manualDBname): """Adds _abbr_ to the database of journals. The database is kept separate from the one scraped from WOS, this supersedes it. The database by default is stored with the WOS one and the name is given by `metaknowledge.journalAbbreviations.manualDBname`. To create an empty database run **addToDB** without an _abbr_ argument. # Parameters _abbr_ : `optional [str or dict[str : str]]` > The journal abbreviation to be added to the database, it can either be a single string in which case that string will be added with its self as the full name, or a dict can be given with the abbreviations as keys and their names as strings, use pipes (`'|'`) to separate multiple names. Note, if the empty string is given as a name the abbreviation will be considered manually __excluded__, i.e. having excludeFromDB() run on it. _dbname_ : `optional [str]` > The name of the database file, default is `metaknowledge.journalAbbreviations.manualDBname`. """ dbLoc = os.path.normpath(os.path.dirname(__file__)) with dbm.dumb.open(dbLoc + '/' + dbname) as db: if isinstance(abbr, str): db[abbr] = abbr elif isinstance(abbr, dict): try: db.update(abbr) except TypeError: raise TypeError("The keys and values of abbr must be strings.") elif abbr is None: pass else: raise TypeError("abbr must be a str or dict.")
Adds _abbr_ to the database of journals. The database is kept separate from the one scraped from WOS, this supersedes it. The database by default is stored with the WOS one and the name is given by `metaknowledge.journalAbbreviations.manualDBname`. To create an empty database run **addToDB** without an _abbr_ argument. # Parameters _abbr_ : `optional [str or dict[str : str]]` > The journal abbreviation to be added to the database, it can either be a single string in which case that string will be added with its self as the full name, or a dict can be given with the abbreviations as keys and their names as strings, use pipes (`'|'`) to separate multiple names. Note, if the empty string is given as a name the abbreviation will be considered manually __excluded__, i.e. having excludeFromDB() run on it. _dbname_ : `optional [str]` > The name of the database file, default is `metaknowledge.journalAbbreviations.manualDBname`.
def render_diagram(root_task, out_base, max_param_len=20, horizontal=False, colored=False): """Render a diagram of the ETL pipeline All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered. Nodes are, by default, styled as simple rects. This style is augmented by any :attr:`diagram_style` attributes of the tasks. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline out_base (str): base output file name (file endings will be appended) max_param_len (int): Maximum shown length of task parameter values horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom colored (bool): If True, show task completion status by color of nodes """ import re import codecs import subprocess from ozelot import config from ozelot.etl.tasks import get_task_name, get_task_param_string # the graph - lines in dot file lines = [u"digraph G {"] if horizontal: lines.append(u"rankdir=LR;") # helper function: make unique task id from task name and parameters: # task name + parameter string, with spaces replaced with _ and all non-alphanumerical characters stripped def get_id(task): s = get_task_name(task) + "_" + get_task_param_string(task) return re.sub(r'\W+', '', re.sub(' ', '_', s)) # node names of tasks that have already been added to the graph existing_nodes = set() # edge sets (tuples of two node names) that have already been added existing_edges = set() # recursion function for generating the pipeline graph def _build(task, parent_id=None): tid = get_id(task) # add node if it's not already there if tid not in existing_nodes: # build task label: task name plus dictionary of parameters as table params = task.to_str_params() param_list = "" for k, v in params.items(): # truncate param value if necessary, and add "..." if len(v) > max_param_len: v = v[:max_param_len] + "..." param_list += "<TR><TD ALIGN=\"LEFT\">" \ "<FONT POINT-SIZE=\"10\">{:s}</FONT>" \ "</TD><TD ALIGN=\"LEFT\">" \ "<FONT POINT-SIZE=\"10\">{:s}</FONT>" \ "</TD></TR>".format(k, v) label = "<TABLE BORDER=\"0\" CELLSPACING=\"1\" CELLPADDING=\"1\">" \ "<TR><TD COLSPAN=\"2\" ALIGN=\"CENTER\">" \ "<FONT POINT-SIZE=\"12\">{:s}</FONT>" \ "</TD></TR>" \ "".format(get_task_name(task)) + param_list + "</TABLE>" style = getattr(task, 'diagram_style', []) if colored: color = ', color="{:s}"'.format("green" if task.complete() else "red") else: color = '' # add a node for the task lines.append(u"{name:s} [label=< {label:s} >, shape=\"rect\" {color:s}, style=\"{style:s}\"];\n" u"".format(name=tid, label=label, color=color, style=','.join(style))) existing_nodes.add(tid) # recurse over requirements for req in task.requires(): _build(req, parent_id=tid) # add edge from current node to (upstream) parent, if it doesn't already exist if parent_id is not None and (tid, parent_id) not in existing_edges: lines.append(u"{source:s} -> {target:s};\n".format(source=tid, target=parent_id)) # generate pipeline graph _build(root_task) # close the graph definition lines.append(u"}") # write description in DOT format with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f: f.write(u"\n".join(lines)) # check existence of DOT_EXECUTABLE variable and file if not hasattr(config, 'DOT_EXECUTABLE'): raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'") if not os.path.exists(config.DOT_EXECUTABLE): raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE)) # render to image using DOT # noinspection PyUnresolvedReferences subprocess.check_call([ config.DOT_EXECUTABLE, '-T', 'png', '-o', out_base + '.png', out_base + '.dot' ])
Render a diagram of the ETL pipeline All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered. Nodes are, by default, styled as simple rects. This style is augmented by any :attr:`diagram_style` attributes of the tasks. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline out_base (str): base output file name (file endings will be appended) max_param_len (int): Maximum shown length of task parameter values horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom colored (bool): If True, show task completion status by color of nodes
def _string_find(self, substr, start=None, end=None): """ Returns position (0 indexed) of first occurence of substring, optionally after a particular position (0 indexed) Parameters ---------- substr : string start : int, default None end : int, default None Not currently implemented Returns ------- position : int, 0 indexed """ if end is not None: raise NotImplementedError return ops.StringFind(self, substr, start, end).to_expr()
Returns position (0 indexed) of first occurence of substring, optionally after a particular position (0 indexed) Parameters ---------- substr : string start : int, default None end : int, default None Not currently implemented Returns ------- position : int, 0 indexed
def compute_checksum(line): """Compute the TLE checksum for the given line.""" return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10
Compute the TLE checksum for the given line.
def maybe_call_closing_deferred(self): """ Used internally to callback on the _closing_deferred if it exists. """ if self._closing_deferred: self._closing_deferred.callback(self) self._closing_deferred = None
Used internally to callback on the _closing_deferred if it exists.
def setModel(self, model): """ Sets the model. Checks that the model is a """ check_class(model, BaseTreeModel) super(ArgosTreeView, self).setModel(model)
Sets the model. Checks that the model is a
def trans_new(name, transform, inverse, breaks=None, minor_breaks=None, _format=None, domain=(-np.inf, np.inf), doc='', **kwargs): """ Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class """ def _get(func): if isinstance(func, (classmethod, staticmethod, MethodType)): return func else: return staticmethod(func) klass_name = '{}_trans'.format(name) d = {'transform': _get(transform), 'inverse': _get(inverse), 'domain': domain, '__doc__': doc, **kwargs} if breaks: d['breaks_'] = _get(breaks) if minor_breaks: d['minor_breaks'] = _get(minor_breaks) if _format: d['format'] = _get(_format) return type(klass_name, (trans,), d)
Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class
def UrnStringToHuntId(urn): """Converts given URN string to a flow id string.""" if urn.startswith(AFF4_PREFIX): urn = urn[len(AFF4_PREFIX):] components = urn.split("/") if len(components) != 2 or components[0] != "hunts": raise ValueError("Invalid hunt URN: %s" % urn) return components[-1]
Converts given URN string to a flow id string.
def lattice(self, lattice): """ Sets Lattice associated with PeriodicSite """ self._lattice = lattice self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
Sets Lattice associated with PeriodicSite
def decode_conjure_bean_type(cls, obj, conjure_type): """Decodes json into a conjure bean type (a plain bean, not enum or union). Args: obj: the json object to decode conjure_type: a class object which is the bean type we're decoding into Returns: A instance of a bean of type conjure_type. """ deserialized = {} # type: Dict[str, Any] for (python_arg_name, field_definition) \ in conjure_type._fields().items(): field_identifier = field_definition.identifier if field_identifier not in obj or obj[field_identifier] is None: cls.check_null_field( obj, deserialized, python_arg_name, field_definition) else: value = obj[field_identifier] field_type = field_definition.field_type deserialized[python_arg_name] = \ cls.do_decode(value, field_type) return conjure_type(**deserialized)
Decodes json into a conjure bean type (a plain bean, not enum or union). Args: obj: the json object to decode conjure_type: a class object which is the bean type we're decoding into Returns: A instance of a bean of type conjure_type.
def make_rawr_zip_payload(rawr_tile, date_time=None): """make a zip file from the rawr tile formatted data""" if date_time is None: date_time = gmtime()[0:6] buf = StringIO() with zipfile.ZipFile(buf, mode='w') as z: for fmt_data in rawr_tile.all_formatted_data: zip_info = zipfile.ZipInfo(fmt_data.name, date_time) z.writestr(zip_info, fmt_data.data, zipfile.ZIP_DEFLATED) return buf.getvalue()
make a zip file from the rawr tile formatted data
def toc(self): """End collecting for current batch and return results. Call after computation of current batch. Returns ------- res : list of """ if not self.activated: return [] for exe in self.exes: for array in exe.arg_arrays: array.wait_to_read() for array in exe.aux_arrays: array.wait_to_read() for exe in self.exes: for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) self.activated = False res = [] if self.sort: self.queue.sort(key=lambda x: x[1]) for n, k, v_list in self.queue: if isinstance(v_list, NDArray): v_list = [v_list] assert isinstance(v_list, list) s = '' for v in v_list: assert isinstance(v, NDArray) if v.shape == (1,): s += str(v.asscalar()) + '\t' else: s += str(v.asnumpy()) + '\t' res.append((n, k, s)) self.queue = [] return res
End collecting for current batch and return results. Call after computation of current batch. Returns ------- res : list of
def add_content(self, content, mime_type=None): """Add content to the email :param contents: Content to be added to the email :type contents: Content :param mime_type: Override the mime type :type mime_type: MimeType, str """ if isinstance(content, str): content = Content(mime_type, content) # Content of mime type text/plain must always come first if content.mime_type == "text/plain": self._contents = self._ensure_insert(content, self._contents) else: if self._contents: index = len(self._contents) else: index = 0 self._contents = self._ensure_append( content, self._contents, index=index)
Add content to the email :param contents: Content to be added to the email :type contents: Content :param mime_type: Override the mime type :type mime_type: MimeType, str
def lost_dimensions(point_fmt_in, point_fmt_out): """ Returns a list of the names of the dimensions that will be lost when converting from point_fmt_in to point_fmt_out """ unpacked_dims_in = PointFormat(point_fmt_in).dtype unpacked_dims_out = PointFormat(point_fmt_out).dtype out_dims = unpacked_dims_out.fields completely_lost = [] for dim_name in unpacked_dims_in.names: if dim_name not in out_dims: completely_lost.append(dim_name) return completely_lost
Returns a list of the names of the dimensions that will be lost when converting from point_fmt_in to point_fmt_out
def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) if six.PY3: out = out.decode('UTF-8') except CalledProcessError: return False return rbd_img in out
Check to see if a RADOS block device exists.
def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) if not requires_dist: return '' return " (%s)" % ','.join(requires_dist)
Compose the version predicates for requirement in PEP 345 fashion.
async def _queue(self, ctx, page: int = 1): """ Shows the player's queue. """ player = self.bot.lavalink.players.get(ctx.guild.id) if not player.queue: return await ctx.send('There\'s nothing in the queue! Why not queue something?') items_per_page = 10 pages = math.ceil(len(player.queue) / items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue_list = '' for index, track in enumerate(player.queue[start:end], start=start): queue_list += f'`{index + 1}.` [**{track.title}**]({track.uri})\n' embed = discord.Embed(colour=discord.Color.blurple(), description=f'**{len(player.queue)} tracks**\n\n{queue_list}') embed.set_footer(text=f'Viewing page {page}/{pages}') await ctx.send(embed=embed)
Shows the player's queue.
def pcmd(host, seq, progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ p = 1 if progressive else 0 at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed.
def dump_misspelling_list(self): """Returns a list of misspelled words and corrections.""" results = [] for bad_word in sorted(self._misspelling_dict.keys()): for correction in self._misspelling_dict[bad_word]: results.append([bad_word, correction]) return results
Returns a list of misspelled words and corrections.