code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def ellipse_to_cov(sigma_maj, sigma_min, theta): cth = np.cos(theta) sth = np.sin(theta) covxx = cth**2 * sigma_maj**2 + sth**2 * sigma_min**2 covyy = sth**2 * sigma_maj**2 + cth**2 * sigma_min**2 covxy = cth * sth * sigma_maj**2 - cth * sth * sigma_min**2 return np.array([[covxx, covxy], [covxy, covyy]])
Compute the covariance matrix in two variables x and y given the std. deviation along the semi-major and semi-minor axes and the rotation angle of the error ellipse. Parameters ---------- sigma_maj : float Std. deviation along major axis of error ellipse. sigma_min : float Std. deviation along minor axis of error ellipse. theta : float Rotation angle in radians from x-axis to ellipse major axis.
def DeleteUser(username): grr_api = maintenance_utils.InitGRRRootAPI() try: grr_api.GrrUser(username).Get().Delete() except api_errors.ResourceNotFoundError: raise UserNotFoundError(username)
Deletes a GRR user from the datastore.
def insert_metric_changes(db, metrics, metric_mapping, commit): values = [ [commit.sha, metric_mapping[metric.name], metric.value] for metric in metrics if metric.value != 0 ] db.executemany( 'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)', values, )
Insert into the metric_changes tables. :param metrics: `list` of `Metric` objects :param dict metric_mapping: Maps metric names to ids :param Commit commit:
def get_datetime(self, tz=None): dt = self.datetime assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime" if tz is not None: dt = dt.astimezone(tz) return dt
Returns the current simulation datetime. Parameters ---------- tz : tzinfo or str, optional The timezone to return the datetime in. This defaults to utc. Returns ------- dt : datetime The current simulation datetime converted to ``tz``.
def getAnalogID(self,num): listidx = self.An.index(num) return self.Ach_id[listidx]
Returns the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header.
def from_dict(self, d: Dict[str, Any]) -> None: for key, value in d.items(): if key.isupper(): self._setattr(key, value) logger.info("Config is loaded from dict: %r", d)
Load values from a dict.
def Is64bit(self): if "64" not in platform.machine(): return False iswow64 = ctypes.c_bool(False) if IsWow64Process is None: return False if not IsWow64Process(self.h_process, ctypes.byref(iswow64)): raise process_error.ProcessError("Error while calling IsWow64Process.") return not iswow64.value
Returns true if this is a 64 bit process.
def GuinierPorod(q, G, Rg, alpha): return GuinierPorodMulti(q, G, Rg, alpha)
Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
def to_creator(self, for_modify=False): signature = {} if for_modify: try: signature['id'] = self.id except AttributeError: raise AttributeError('a modify request should specify an ID') if hasattr(self, 'name'): signature['name'] = self.name else: signature['name'] = self.name if self.has_content(): if self._contenttype == 'text/plain': plain_text = self._content html_text = '' else: html_text = self._content plain_text = '' content_plain = {'type': 'text/plain', '_content': plain_text} content_html = {'type': 'text/html', '_content': html_text} signature['content'] = [content_plain, content_html] else: if not for_modify: raise AttributeError( 'too little information on signature, ' 'run setContent before') return signature
Returns a dict object suitable for a 'CreateSignature'. A signature object for creation is like : <signature name="unittest"> <content type="text/plain">My signature content</content> </signature> which is : { 'name' : 'unittest', 'content': { 'type': 'text/plain', '_content': 'My signature content' } } Note that if the contenttype is text/plain, the content with text/html will be cleared by the request (for consistency).
def init1(self, dae): self.v0 = matrix(dae.y[self.v])
Set initial voltage for time domain simulation
def delete_speaker(self, speaker_uri): response = self.api_request(speaker_uri, method='DELETE') return self.__check_success(response)
Delete an speaker from a collection :param speaker_uri: the URI that references the speaker :type speaker_uri: String :rtype: Boolean :returns: True if the speaker was deleted :raises: APIError if the request was not successful
def wells_by_index(self) -> Dict[str, Well]: return {well: wellObj for well, wellObj in zip(self._ordering, self._wells)}
Accessor function used to create a look-up table of Wells by name. With indexing one can treat it as a typical python dictionary whose keys are well names. To access well A1, for example, simply write: labware.wells_by_index()['A1'] :return: Dictionary of well objects keyed by well name
def delete_channel(current): ch_key = current.input['channel_key'] ch = Channel(current).objects.get(owner_id=current.user_id, key=ch_key) ch.delete() Subscriber.objects.filter(channel_id=ch_key).delete() Message.objects.filter(channel_id=ch_key).delete() current.output = {'status': 'Deleted', 'code': 200}
Delete a channel .. code-block:: python # request: { 'view':'_zops_delete_channel, 'channel_key': key, } # response: { 'status': 'OK', 'code': 200 }
def attach_file(self, filename, resource_id=None): if resource_id is None: resource_id = os.path.basename(filename) upload_url = self.links[REF_MODEL_RUN_ATTACHMENTS] + '/' + resource_id response = requests.post( upload_url, files={'file': open(filename, 'rb')} ) if response.status_code != 200: try: raise ValueError(json.loads(response.text)['message']) except KeyError as ex: raise ValueError('invalid state change: ' + str(response.text)) return self.refresh()
Upload an attachment for the model run. Paramerers ---------- filename : string Path to uploaded file resource_id : string Identifier of the attachment. If None, the filename will be used as resource identifier. Returns ------- ModelRunHandle Refreshed run handle.
def color(self, color): self._data['color'] = color request = self._base_request request['color'] = color return self._tc_requests.update(request, owner=self.owner)
Updates the security labels color. Args: color:
def get_disabled(jail=None): en_ = get_enabled(jail) all_ = get_all(jail) return sorted(set(all_) - set(en_))
Return what services are available but not enabled to start at boot .. versionchanged:: 2016.3.4 Support for jail (representing jid or jail name) keyword argument in kwargs CLI Example: .. code-block:: bash salt '*' service.get_disabled
def getBirthdate(self, string=True): if string: return self._convert_string(self.birthdate.rstrip()) else: return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y")
Returns the birthdate as string object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getBirthdate()=='30 jun 1969' True >>> f._close() >>> del f
def remove_output(clean=False, **kwargs): if not clean: return False found = False cwd = os.getcwd() for file in os.listdir(cwd): if file.endswith('_eig.txt') or \ file.endswith('_out.txt') or \ file.endswith('_out.lst') or \ file.endswith('_out.dat') or \ file.endswith('_prof.txt'): found = True try: os.remove(file) logger.info('<{:s}> removed.'.format(file)) except IOError: logger.error('Error removing file <{:s}>.'.format(file)) if not found: logger.info('no output found in the working directory.') return True
Remove the outputs generated by Andes, including power flow reports ``_out.txt``, time-domain list ``_out.lst`` and data ``_out.dat``, eigenvalue analysis report ``_eig.txt``. Parameters ---------- clean : bool If ``True``, execute the function body. Returns otherwise. kwargs : dict Other keyword arguments Returns ------- bool ``True`` is the function body executes with success. ``False`` otherwise.
def is_locked(self, request: AxesHttpRequest, credentials: dict = None) -> bool: if settings.AXES_LOCK_OUT_AT_FAILURE: return self.get_failures(request, credentials) >= settings.AXES_FAILURE_LIMIT return False
Checks if the request or given credentials are locked.
def get_action(self, action): func_name = action.replace('-', '_') if not hasattr(self, func_name): raise DaemonError( 'Invalid action "{action}"'.format(action=action)) func = getattr(self, func_name) if (not hasattr(func, '__call__') or getattr(func, '__daemonocle_exposed__', False) is not True): raise DaemonError( 'Invalid action "{action}"'.format(action=action)) return func
Get a callable action.
def send_file_from_directory(filename, directory, app=None): if app is None: app = current_app cache_timeout = app.get_send_file_max_age(filename) return send_from_directory(directory, filename, cache_timeout=cache_timeout)
Helper to add static rules, like in `abilian.app`.app. Example use:: app.add_url_rule( app.static_url_path + '/abilian/<path:filename>', endpoint='abilian_static', view_func=partial(send_file_from_directory, directory='/path/to/static/files/dir'))
def add_attribute_label(self, attribute_id, label): if not self.can_update(): self._tcex.handle_error(910, [self.type]) return self.tc_requests.add_attribute_label( self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner )
Adds a security labels to a attribute Args: attribute_id: label: Returns: A response json
def load_graph_xml(xml, filename, load_all=False): ret = [] try: root = objectify.fromstring(xml) except Exception: return [] if root.tag != 'graphs': return [] if not hasattr(root, 'graph'): return [] for g in root.graph: name = g.attrib['name'] expressions = [e.text for e in g.expression] if load_all: ret.append(GraphDefinition(name, e, g.description.text, expressions, filename)) continue if have_graph(name): continue for e in expressions: if expression_ok(e): ret.append(GraphDefinition(name, e, g.description.text, expressions, filename)) break return ret
load a graph from one xml string
def mapstr_to_list(mapstr): maplist = [] with StringIO(mapstr) as infile: for row in infile: maplist.append(row.strip()) return maplist
Convert an ASCII map string with rows to a list of strings, 1 string per row.
def patch_ligotimegps(module="ligo.lw.lsctables"): module = import_module(module) orig = module.LIGOTimeGPS module.LIGOTimeGPS = _ligotimegps try: yield finally: module.LIGOTimeGPS = orig
Context manager to on-the-fly patch LIGOTimeGPS to accept all int types
def circleconvert(amount, currentformat, newformat): if currentformat.lower() == newformat.lower(): return amount if currentformat.lower() == 'radius': if newformat.lower() == 'diameter': return amount * 2 elif newformat.lower() == 'circumference': return amount * 2 * math.pi raise ValueError("Invalid new format provided.") elif currentformat.lower() == 'diameter': if newformat.lower() == 'radius': return amount / 2 elif newformat.lower() == 'circumference': return amount * math.pi raise ValueError("Invalid new format provided.") elif currentformat.lower() == 'circumference': if newformat.lower() == 'radius': return amount / math.pi / 2 elif newformat.lower() == 'diameter': return amount / math.pi
Convert a circle measurement. :type amount: number :param amount: The number to convert. :type currentformat: string :param currentformat: The format of the provided value. :type newformat: string :param newformat: The intended format of the value. >>> circleconvert(45, "radius", "diameter") 90
def license_id(filename): import editdistance with io.open(filename, encoding='UTF-8') as f: contents = f.read() norm = _norm_license(contents) min_edit_dist = sys.maxsize min_edit_dist_spdx = '' for spdx, text in licenses.LICENSES: norm_license = _norm_license(text) if norm == norm_license: return spdx if norm and abs(len(norm) - len(norm_license)) / len(norm) > .05: continue edit_dist = editdistance.eval(norm, norm_license) if edit_dist < min_edit_dist: min_edit_dist = edit_dist min_edit_dist_spdx = spdx if norm and min_edit_dist / len(norm) < .05: return min_edit_dist_spdx else: return None
Return the spdx id for the license contained in `filename`. If no license is detected, returns `None`. spdx: https://spdx.org/licenses/ licenses from choosealicense.com: https://github.com/choosealicense.com Approximate algorithm: 1. strip copyright line 2. normalize whitespace (replace all whitespace with a single space) 3. check exact text match with existing licenses 4. failing that use edit distance
def get_recent_tracks(self, limit=10, cacheable=True, time_from=None, time_to=None): params = self._get_params() if limit: params["limit"] = limit if time_from: params["from"] = time_from if time_to: params["to"] = time_to seq = [] for track in _collect_nodes( limit, self, self.ws_prefix + ".getRecentTracks", cacheable, params ): if track.hasAttribute("nowplaying"): continue title = _extract(track, "name") artist = _extract(track, "artist") date = _extract(track, "date") album = _extract(track, "album") timestamp = track.getElementsByTagName("date")[0].getAttribute("uts") seq.append( PlayedTrack(Track(artist, title, self.network), album, date, timestamp) ) return seq
Returns this user's played track as a sequence of PlayedTrack objects in reverse order of playtime, all the way back to the first track. Parameters: limit : If None, it will try to pull all the available data. from (Optional) : Beginning timestamp of a range - only display scrobbles after this time, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. to (Optional) : End timestamp of a range - only display scrobbles before this time, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. This method uses caching. Enable caching only if you're pulling a large amount of data.
def runtime(self): warnings.warn("admm.ADMM.runtime attribute has been replaced by " "an upgraded timer class: please see the documentation " "for admm.ADMM.solve method and util.Timer class", PendingDeprecationWarning) return self.timer.elapsed('init') + self.timer.elapsed('solve')
Transitional property providing access to the new timer mechanism. This will be removed in the future.
def close_transport(self): if (self.path): self._release_media_transport(self.path, self.access_type) self.path = None
Forcibly close previously acquired media transport. .. note:: The user should first make sure any transport event handlers are unregistered first.
async def runItemCmdr(item, outp=None, **opts): cmdr = await getItemCmdr(item, outp=outp, **opts) await cmdr.runCmdLoop()
Create a cmdr for the given item and run the cmd loop. Example: runItemCmdr(foo)
def _start_scan(self, active): success, retval = self._set_scan_parameters(active=active) if not success: return success, retval try: response = self._send_command(6, 2, [2]) if response.payload[0] != 0: self._logger.error('Error starting scan for devices, error=%d', response.payload[0]) return False, {'reason': "Could not initiate scan for ble devices, error_code=%d, response=%s" % (response.payload[0], response)} except InternalTimeoutError: return False, {'reason': "Timeout waiting for response"} return True, None
Begin scanning forever
def create_signature(self, base_url, payload=None): url = urlparse(base_url) url_to_sign = "{path}?{query}".format(path=url.path, query=url.query) converted_payload = self._convert(payload) decoded_key = base64.urlsafe_b64decode(self.private_key.encode('utf-8')) signature = hmac.new(decoded_key, str.encode(url_to_sign + converted_payload), hashlib.sha256) return bytes.decode(base64.urlsafe_b64encode(signature.digest()))
Creates unique signature for request. Make sure ALL 'GET' and 'POST' data is already included before creating the signature or receiver won't be able to re-create it. :param base_url: The url you'll using for your request. :param payload: The POST data that you'll be sending.
def format_string(format, *cols): sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
Formats the arguments in printf-style and returns the result as a string column. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v=u'5 hello')]
def enable_asynchronous(self): def is_monkey_patched(): try: from gevent import monkey, socket except ImportError: return False if hasattr(monkey, "saved"): return "socket" in monkey.saved return gevent.socket.socket == socket.socket if not is_monkey_patched(): raise Exception("To activate asynchonoucity, please monkey patch" " the socket module with gevent") return True
Check if socket have been monkey patched by gevent
def set_debug_listener(stream): def debugger(sig, frame): launch_debugger(frame, stream) if hasattr(signal, 'SIGUSR1'): signal.signal(signal.SIGUSR1, debugger) else: logger.warn("Cannot set SIGUSR1 signal for debug mode.")
Break into a debugger if receives the SIGUSR1 signal
def eat_config(self, conf_file): cfg = ConfigParser.RawConfigParser() cfg.readfp(conf_file) sec = 'channels' mess = 'missmatch of channel keys' assert(set(self.pack.D.keys()) == set([int(i) for i in cfg.options(sec)])), mess if not self.pack.chnames: self.pack.chnames = dict(self.pack.chnames_0) for i in cfg.options(sec): self.pack.chnames[self.pack._key(int(i))] = cfg.get(sec, i) sec = 'conditions' conops = cfg.options(sec) self.reset() for con in conops: self.set_condition(con, cfg.get(sec, con))
conf_file a file opened for reading. Update the packs channel names and the conditions, accordingly.
def has_dtypes(df, items): dtypes = df.dtypes for k, v in items.items(): if not dtypes[k] == v: raise AssertionError("{} has the wrong dtype. Should be ({}), is ({})".format(k, v,dtypes[k])) return df
Assert that a DataFrame has ``dtypes`` Parameters ========== df: DataFrame items: dict mapping of columns to dtype. Returns ======= df : DataFrame
def request_search(self, txt=None): if self.checkBoxRegex.isChecked(): try: re.compile(self.lineEditSearch.text(), re.DOTALL) except sre_constants.error as e: self._show_error(e) return else: self._show_error(None) if txt is None or isinstance(txt, int): txt = self.lineEditSearch.text() if txt: self.job_runner.request_job( self._exec_search, txt, self._search_flags()) else: self.job_runner.cancel_requests() self._clear_occurrences() self._on_search_finished()
Requests a search operation. :param txt: The text to replace. If None, the content of lineEditSearch is used instead.
def _pre_tune(self): if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err )
Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running
def MICECache(subsystem, parent_cache=None): if config.REDIS_CACHE: cls = RedisMICECache else: cls = DictMICECache return cls(subsystem, parent_cache=parent_cache)
Construct a |MICE| cache. Uses either a Redis-backed cache or a local dict cache on the object. Args: subsystem (Subsystem): The subsystem that this is a cache for. Kwargs: parent_cache (MICECache): The cache generated by the uncut version of ``subsystem``. Any cached |MICE| which are unaffected by the cut are reused in this cache. If None, the cache is initialized empty.
def is_member(self, ldap_user, group_dn): try: user_uid = ldap_user.attrs["uid"][0] try: is_member = ldap_user.connection.compare_s( group_dn, "memberUid", user_uid.encode() ) except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False if not is_member: try: user_gid = ldap_user.attrs["gidNumber"][0] is_member = ldap_user.connection.compare_s( group_dn, "gidNumber", user_gid.encode() ) except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False except (KeyError, IndexError): is_member = False return is_member
Returns True if the group is the user's primary group or if the user is listed in the group's memberUid attribute.
def get_compute_credentials(key): scopes = ['https://www.googleapis.com/auth/compute'] credentials = ServiceAccountCredentials.from_json_keyfile_dict( key, scopes=scopes) return credentials
Authenticates a service account for the compute engine. This uses the `oauth2client.service_account` module. Since the `google` Python package does not support the compute engine (yet?), we need to make direct HTTP requests. For that we need authentication tokens. Obtaining these based on the credentials provided by the `google.auth2` module is much more cumbersome than using the `oauth2client` module. See: - https://cloud.google.com/iap/docs/authentication-howto - https://developers.google.com/identity/protocols/OAuth2ServiceAccount TODO: docstring
async def _scp(self, source, destination, scp_opts): cmd = [ 'scp', '-i', os.path.expanduser('~/.local/share/juju/ssh/juju_id_rsa'), '-o', 'StrictHostKeyChecking=no', '-q', '-B' ] cmd.extend(scp_opts.split() if isinstance(scp_opts, str) else scp_opts) cmd.extend([source, destination]) loop = self.model.loop process = await asyncio.create_subprocess_exec(*cmd, loop=loop) await process.wait() if process.returncode != 0: raise JujuError("command failed: %s" % cmd)
Execute an scp command. Requires a fully qualified source and destination.
def setAutoRangeOff(self): if self.getRefreshBlocked(): logger.debug("setAutoRangeOff blocked for {}".format(self.nodeName)) return if self.autoRangeCti: self.autoRangeCti.data = False self._forceRefreshAutoRange()
Turns off the auto range checkbox. Calls _refreshNodeFromTarget, not _updateTargetFromNode, because setting auto range off does not require a redraw of the target.
def data_complete(self): return task.data_complete(self.datadir, self.sitedir, self._get_container_name)
Return True if all the expected datadir files are present
def main(self, x): for i in range(len(self.taps_fix_reversed)): self.next.mul[i] = x * self.taps_fix_reversed[i] if i == 0: self.next.acc[0] = self.mul[i] else: self.next.acc[i] = self.acc[i - 1] + self.mul[i] self.next.out = self.acc[-1] return self.out
Transposed form FIR implementation, uses full precision
def validate_process_steps(prop, value): if value is not None: validate_type(prop, value, (dict, list)) procstep_keys = set(_complex_definitions[prop]) for idx, procstep in enumerate(wrap_value(value)): ps_idx = prop + '[' + str(idx) + ']' validate_type(ps_idx, procstep, dict) for ps_prop, ps_val in iteritems(procstep): ps_key = '.'.join((ps_idx, ps_prop)) if ps_prop not in procstep_keys: _validation_error(prop, None, value, ('keys: {0}'.format(','.join(procstep_keys)))) if ps_prop != 'sources': validate_type(ps_key, ps_val, string_types) else: validate_type(ps_key, ps_val, (string_types, list)) for src_idx, src_val in enumerate(wrap_value(ps_val)): src_key = ps_key + '[' + str(src_idx) + ']' validate_type(src_key, src_val, string_types)
Default validation for Process Steps data structure
def load_data(path, dense=False): catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5} ext = os.path.splitext(path)[1] func = catalog[ext] X, y = func(path) if dense and sparse.issparse(X): X = X.todense() return X, y
Load data from a CSV, LibSVM or HDF5 file based on the file extension. Args: path (str): A path to the CSV, LibSVM or HDF5 format file containing data. dense (boolean): An optional variable indicating if the return matrix should be dense. By default, it is false. Returns: Data matrix X and target vector y
async def get_advanced_settings(request: web.Request) -> web.Response: res = _get_adv_settings() return web.json_response(res)
Handles a GET request and returns a json body with the key "settings" and a value that is a list of objects where each object has keys "id", "title", "description", and "value"
def check_config(conf): if 'fmode' in conf and not isinstance(conf['fmode'], string_types): raise TypeError(TAG + ": `fmode` must be a string") if 'dmode' in conf and not isinstance(conf['dmode'], string_types): raise TypeError(TAG + ": `dmode` must be a string") if 'depth' in conf: if not isinstance(conf['depth'], int): raise TypeError(TAG + ": `depth` must be an int") if conf['depth'] < 0: raise ValueError(TAG + ": `depth` must be a positive number") if 'hash_alg' in conf: if not isinstance(conf['hash_alg'], string_types): raise TypeError(TAG + ": `hash_alg` must be a string") if conf['hash_alg'] not in ACCEPTED_HASH_ALG: raise ValueError(TAG + ": `hash_alg` must be one of " + str(ACCEPTED_HASH_ALG))
Type and boundary check
def OneResult(parser): "Parse like parser, but return exactly one result, not a tuple." def parse(text): results = parser(text) assert len(results) == 1, "Expected one result but got %r" % (results,) return results[0] return parse
Parse like parser, but return exactly one result, not a tuple.
def updateData(self, signal, fs): t = threading.Thread(target=_doSpectrogram, args=(self.spec_done, (fs, signal),), kwargs=self.specgramArgs) t.start()
Displays a spectrogram of the provided signal :param signal: 1-D signal of audio :type signal: numpy.ndarray :param fs: samplerate of signal :type fs: int
def update(self, items): post_data = {} items_json = json.dumps(items, default=dthandler) post_data['data'] = items_json response = self.post_attribute("update", data=post_data) return response['ticket']
Update a catalog object Args: items (list): A list of dicts describing update data and action codes (see api docs) Kwargs: Returns: A ticket id Example: >>> c = catalog.Catalog('my_songs', type='song') >>> items [{'action': 'update', 'item': {'artist_name': 'dAn ThE aUtOmAtOr', 'disc_number': 1, 'genre': 'Instrumental', 'item_id': '38937DDF04BC7FC4', 'play_count': 5, 'release': 'Bombay the Hard Way: Guns, Cars & Sitars', 'song_name': 'Inspector Jay From Dehli', 'track_number': 9, 'url': 'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a'}}] >>> ticket = c.update(items) >>> ticket u'7dcad583f2a38e6689d48a792b2e4c96' >>> c.status(ticket) {u'ticket_status': u'complete', u'update_info': []} >>>
def find_boost(): short_version = "{}{}".format(sys.version_info[0], sys.version_info[1]) boostlibnames = ['boost_python-py' + short_version, 'boost_python' + short_version, 'boost_python', ] if sys.version_info[0] == 2: boostlibnames += ["boost_python-mt"] else: boostlibnames += ["boost_python3-mt"] for libboostname in boostlibnames: if find_library_file(libboostname): return libboostname warnings.warn(no_boost_error) return boostlibnames[0]
Find the name of the boost-python library. Returns None if none is found.
def _pfp__show(self, level=0, include_offset=False): res = [] res.append("{}{} {{".format( "{:04x} ".format(self._pfp__offset) if include_offset else "", self._pfp__show_name )) for child in self._pfp__children: res.append("{}{}{:10s} = {}".format( " "*(level+1), "{:04x} ".format(child._pfp__offset) if include_offset else "", child._pfp__name, child._pfp__show(level+1, include_offset) )) res.append("{}}}".format(" "*level)) return "\n".join(res)
Show the contents of the struct
def _exposure_to_weights(self, y, exposure=None, weights=None): y = y.ravel() if exposure is not None: exposure = np.array(exposure).astype('f').ravel() exposure = check_array(exposure, name='sample exposure', ndim=1, verbose=self.verbose) else: exposure = np.ones_like(y.ravel()).astype('float64') exposure = exposure.ravel() check_lengths(y, exposure) y = y / exposure if weights is not None: weights = np.array(weights).astype('f').ravel() weights = check_array(weights, name='sample weights', ndim=1, verbose=self.verbose) else: weights = np.ones_like(y).astype('float64') check_lengths(weights, exposure) weights = weights * exposure return y, weights
simple tool to create a common API Parameters ---------- y : array-like, shape (n_samples,) Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes. exposure : array-like shape (n_samples,) or None, default: None containing exposures if None, defaults to array of ones weights : array-like shape (n_samples,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- y : y normalized by exposure weights : array-like shape (n_samples,)
def area(self): r edges = tuple(edge._nodes for edge in self._edges) return _surface_helpers.compute_area(edges)
r"""The area of the current curved polygon. This assumes, but does not check, that the current curved polygon is valid (i.e. it is bounded by the edges). This computes the area via Green's theorem. Using the vector field :math:`\mathbf{F} = \left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says .. math:: \int_{\mathcal{P}} 2 \, d\textbf{x} = \int_{\partial \mathcal{P}} -y \, dx + x \, dy (where :math:`\mathcal{P}` is the current curved polygon). Note that for a given edge :math:`C(r)` with control points :math:`x_j, y_j`, the integral can be simplified: .. math:: \int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr = \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d} b'_{j, d} \, dr where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials. Returns: float: The area of the current curved polygon.
def copy_table( self, sources, destination, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None, retry=DEFAULT_RETRY, ): job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location job_ref = job._JobReference(job_id, project=project, location=location) sources = _table_arg_to_table_ref(sources, default_project=self.project) if not isinstance(sources, collections_abc.Sequence): sources = [sources] sources = [ _table_arg_to_table_ref(source, default_project=self.project) for source in sources ] destination = _table_arg_to_table_ref(destination, default_project=self.project) copy_job = job.CopyJob( job_ref, sources, destination, client=self, job_config=job_config ) copy_job._begin(retry=retry) return copy_job
Copy one or more tables to another table. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy Arguments: sources (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ Sequence[ \ Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ] \ ], \ ]): Table or tables to be copied. destination (Union[ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): Table into which data is to be copied. Keyword Arguments: job_id (str): (Optional) The ID of the job. job_id_prefix (str) (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of any source table as well as the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.CopyJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.CopyJob: A new copy job instance.
def reboot(self): reboot_msg = self.message_factory.command_long_encode( 0, 0, mavutil.mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0, 1, 0, 0, 0, 0, 0, 0) self.send_mavlink(reboot_msg)
Requests an autopilot reboot by sending a ``MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN`` command.
def validate_range_value(value): if value == (None, None): return if not hasattr(value, '__iter__'): raise TypeError('Range value must be an iterable, got "%s".' % value) if not 2 == len(value): raise ValueError('Range value must consist of two elements, got %d.' % len(value)) if not all(isinstance(x, (int,float)) for x in value): raise TypeError('Range value must consist of two numbers, got "%s" ' 'and "%s" instead.' % value) if not value[0] <= value[1]: raise ValueError('Range must consist of min and max values (min <= ' 'max) but got "%s" and "%s" instead.' % value) return
Validates given value against `Schema.TYPE_RANGE` data type. Raises TypeError or ValueError if something is wrong. Returns None if everything is OK.
def is_valid(number): n = str(number) if not n.isdigit(): return False return int(n[-1]) == get_check_digit(n[:-1])
determines whether the card number is valid.
def unaccent(string, encoding="utf-8"): string = to_unicode(string) if has_unidecode: return unidecode.unidecode(string) if PYTHON_VERSION < 3: if type(string) == str: string = unicode(string, encoding) nfkd_form = unicodedata.normalize('NFKD', string) return u"".join([c for c in nfkd_form if not unicodedata.combining(c)]).encode("ascii", "ignore") else: return string
not just unaccent, but full to-ascii transliteration
def get_global_vars(func): closure = getclosurevars(func) if closure['nonlocal']: raise TypeError("Can't launch a job with closure variables: %s" % closure['nonlocals'].keys()) globalvars = dict(modules={}, functions={}, vars={}) for name, value in closure['global'].items(): if inspect.ismodule(value): globalvars['modules'][name] = value.__name__ elif inspect.isfunction(value) or inspect.ismethod(value): globalvars['functions'][name] = value else: globalvars['vars'][name] = value return globalvars
Store any methods or variables bound from the function's closure Args: func (function): function to inspect Returns: dict: mapping of variable names to globally bound VARIABLES
def by_type(self, type_name): if IRestorator.providedBy(type_name): type_name = type_name.type_name return (x[1] for x in self._links if x[0] == type_name)
Return an iterator of doc_ids of the documents of the specified type.
def remove_all_static_host_mappings(): LOG.debug("remove_host_mapping() called") session = bc.get_writer_session() try: mapping = _lookup_all_host_mappings( session=session, is_static=True) for host in mapping: session.delete(host) session.flush() except c_exc.NexusHostMappingNotFound: pass
Remove all entries defined in config file from mapping data base.
def _exit_handling(self): def close_asyncio_loop(): loop = None try: loop = asyncio.get_event_loop() except AttributeError: pass if loop is not None: loop.close() atexit.register(close_asyncio_loop)
Makes sure the asyncio loop is closed.
def __add_paths(self, config): bin_path = os.path.join(self.directory.install_directory(self.feature_name), 'bin') whitelist_executables = self._get_whitelisted_executables(config) for f in os.listdir(bin_path): for pattern in BLACKLISTED_EXECUTABLES: if re.match(pattern, f): continue if whitelist_executables and f not in whitelist_executables: continue self.directory.symlink_to_bin(f, os.path.join(bin_path, f))
add the proper resources into the environment
def stop_processing(self, warning=True): if not self.__is_processing: warning and LOGGER.warning( "!> {0} | Engine is not processing, 'stop_processing' request has been ignored!".format( self.__class__.__name__)) return False LOGGER.debug("> Stopping processing operation!") self.__is_processing = False self.Application_Progress_Status_processing.Processing_label.setText(QString()) self.Application_Progress_Status_processing.Processing_progressBar.setRange(0, 100) self.Application_Progress_Status_processing.Processing_progressBar.setValue(0) self.Application_Progress_Status_processing.hide() return True
Registers the end of a processing operation. :param warning: Emit warning message. :type warning: int :return: Method success. :rtype: bool
def _plot_thermo(self, func, temperatures, factor=1, ax=None, ylabel=None, label=None, ylim=None, **kwargs): ax, fig, plt = get_ax_fig_plt(ax) values = [] for t in temperatures: values.append(func(t, structure=self.structure) * factor) ax.plot(temperatures, values, label=label, **kwargs) if ylim: ax.set_ylim(ylim) ax.set_xlim((np.min(temperatures), np.max(temperatures))) ylim = plt.ylim() if ylim[0] < 0 < ylim[1]: plt.plot(plt.xlim(), [0, 0], 'k-', linewidth=1) ax.set_xlabel(r"$T$ (K)") if ylabel: ax.set_ylabel(ylabel) return fig
Plots a thermodynamic property for a generic function from a PhononDos instance. Args: func: the thermodynamic function to be used to calculate the property temperatures: a list of temperatures factor: a multiplicative factor applied to the thermodynamic property calculated. Used to change the units. ax: matplotlib :class:`Axes` or None if a new figure should be created. ylabel: label for the y axis label: label of the plot ylim: tuple specifying the y-axis limits. kwargs: kwargs passed to the matplotlib function 'plot'. Returns: matplotlib figure
def parse_info(response): info = {} response = response.decode('utf-8') def get_value(value): if ',' and '=' not in value: return value sub_dict = {} for item in value.split(','): k, v = item.split('=') try: sub_dict[k] = int(v) except ValueError: sub_dict[k] = v return sub_dict data = info for line in response.splitlines(): keyvalue = line.split(':') if len(keyvalue) == 2: key, value = keyvalue try: data[key] = int(value) except ValueError: data[key] = get_value(value) else: data = {} info[line[2:]] = data return info
Parse the response of Redis's INFO command into a Python dict. In doing so, convert byte data into unicode.
def _updateWordSet(self): self._wordSet = set(self._keywords) | set(self._customCompletions) start = time.time() for line in self._qpart.lines: for match in _wordRegExp.findall(line): self._wordSet.add(match) if time.time() - start > self._WORD_SET_UPDATE_MAX_TIME_SEC: break
Make a set of words, which shall be completed, from text
def get_inputs(node, kwargs): name = node["name"] proc_nodes = kwargs["proc_nodes"] index_lookup = kwargs["index_lookup"] inputs = node["inputs"] attrs = node.get("attrs", {}) input_nodes = [] for ip in inputs: input_node_id = index_lookup[ip[0]] input_nodes.append(proc_nodes[input_node_id].name) return name, input_nodes, attrs
Helper function to get inputs
def _update_records(self, records, data): data = {k: v for k, v in data.items() if v} records = [dict(record, **data) for record in records] return self._apicall( 'updateDnsRecords', domainname=self.domain, dnsrecordset={'dnsrecords': records}, ).get('dnsrecords', [])
Insert or update a list of DNS records, specified in the netcup API convention. The fields ``hostname``, ``type``, and ``destination`` are mandatory and must be provided either in the record dict or through ``data``!
def modpath_pkg_resources(module, entry_point): result = [] try: path = resource_filename_mod_entry_point(module.__name__, entry_point) except ImportError: logger.warning("module '%s' could not be imported", module.__name__) except Exception: logger.warning("%r does not appear to be a valid module", module) else: if path: result.append(path) return result
Goes through pkg_resources for compliance with various PEPs. This one accepts a module as argument.
def prep_for_intensity_plot(data, meth_code, dropna=(), reqd_cols=()): dropna = list(dropna) reqd_cols = list(reqd_cols) try: magn_col = get_intensity_col(data) except AttributeError: return False, "Could not get intensity method from data" if magn_col not in dropna: dropna.append(magn_col) data = data.dropna(axis=0, subset=dropna) if 'method_codes' not in reqd_cols: reqd_cols.append('method_codes') if magn_col not in reqd_cols: reqd_cols.append(magn_col) try: data = data[reqd_cols] except KeyError as ex: print(ex) missing = set(reqd_cols).difference(data.columns) return False, "missing these required columns: {}".format(", ".join(missing)) data = data[data['method_codes'].str.contains(meth_code).astype(bool)] return True, data
Strip down measurement data to what is needed for an intensity plot. Find the column with intensity data. Drop empty columns, and make sure required columns are present. Keep only records with the specified method code. Parameters ---------- data : pandas DataFrame measurement dataframe meth_code : str MagIC method code to include, i.e. 'LT-AF-Z' dropna : list columns that must not be empty reqd_cols : list columns that must be present Returns ---------- status : bool True if successful, else False data : pandas DataFrame measurement data with required columns
def overtime(self): if self._overtime.lower() == 'ot': return 1 if self._overtime.lower() == 'so': return SHOOTOUT if self._overtime == '': return 0 num = re.findall(r'\d+', self._overtime) if len(num) > 0: return num[0] return 0
Returns an ``int`` of the number of overtimes that were played during the game, or an int constant if the game went to a shootout.
def _maybe_purge_cache(self): if self._last_reload_check + MIN_CHECK_INTERVAL > time.time(): return for name, tmpl in list(self.cache.items()): if not os.stat(tmpl.path): self.cache.pop(name) continue if os.stat(tmpl.path).st_mtime > tmpl.mtime: self.cache.clear() break self._last_reload_check = time.time()
If enough time since last check has elapsed, check if any of the cached templates has changed. If any of the template files were deleted, remove that file only. If any were changed, then purge the entire cache.
def get_subwords(self, word, on_unicode_error='strict'): pair = self.f.getSubwords(word, on_unicode_error) return pair[0], np.array(pair[1])
Given a word, get the subwords and their indicies.
def toggle_rich_text(self, checked): if checked: self.docstring = not checked self.switch_to_rich_text() self.set_option('rich_mode', checked)
Toggle between sphinxified docstrings or plain ones
def get_entities_tsv(namespace, workspace, etype): uri = "workspaces/{0}/{1}/entities/{2}/tsv".format(namespace, workspace, etype) return __get(uri)
List entities of given type in a workspace as a TSV. Identical to get_entities(), but the response is a TSV. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name etype (str): Entity type Swagger: https://api.firecloud.org/#!/Entities/browserDownloadEntitiesTSV
def _find_cont_gaussian_smooth(wl, fluxes, ivars, w): print("Finding the continuum") bot = np.dot(ivars, w.T) top = np.dot(fluxes*ivars, w.T) bad = bot == 0 cont = np.zeros(top.shape) cont[~bad] = top[~bad] / bot[~bad] return cont
Returns the weighted mean block of spectra Parameters ---------- wl: numpy ndarray wavelength vector flux: numpy ndarray block of flux values ivar: numpy ndarray block of ivar values L: float width of Gaussian used to assign weights Returns ------- smoothed_fluxes: numpy ndarray block of smoothed flux values, mean spectra
def togglePopup(self): if not self._popupWidget.isVisible(): self.showPopup() elif self._popupWidget.currentMode() != self._popupWidget.Mode.Dialog: self._popupWidget.close()
Toggles whether or not the popup is visible.
def createLrrBafPlot(raw_dir, problematic_samples, format, dpi, out_prefix): dir_name = out_prefix + ".LRR_BAF" if not os.path.isdir(dir_name): os.mkdir(dir_name) baf_lrr_plot_options = ["--problematic-samples", problematic_samples, "--raw-dir", raw_dir, "--format", format, "--dpi", str(dpi), "--out", os.path.join(dir_name, "baf_lrr")] try: baf_lrr_plot.main(baf_lrr_plot_options) except baf_lrr_plot.ProgramError as e: msg = "BAF LRR plot: {}".format(e) raise ProgramError(msg)
Creates the LRR and BAF plot. :param raw_dir: the directory containing the intensities. :param problematic_samples: the file containing the problematic samples. :param format: the format of the plot. :param dpi: the DPI of the resulting images. :param out_prefix: the prefix of the output file. :type raw_dir: str :type problematic_samples: str :type format: str :type out_prefix: str Creates the LRR (Log R Ratio) and BAF (B Allele Frequency) of the problematic samples using the :py:mod:`pyGenClean.SexCheck.baf_lrr_plot` module.
def format_jid_instance_ext(jid, job): ret = format_job_instance(job) ret.update({ 'JID': jid, 'StartTime': jid_to_time(jid)}) return ret
Format the jid correctly with jid included
def setStartSegment(self, segment): segments = self.segments if not isinstance(segment, int): segmentIndex = segments.index(segment) else: segmentIndex = segment if len(self.segments) < 2: return if segmentIndex == 0: return if segmentIndex >= len(segments): raise ValueError(("The contour does not contain a segment " "at index %d" % segmentIndex)) self._setStartSegment(segmentIndex)
Set the first segment on the contour. segment can be a segment object or an index.
def iterate(self, max_iter=None): with self as active_streamer: for n, obj in enumerate(active_streamer.stream_): if max_iter is not None and n >= max_iter: break yield obj
Instantiate an iterator. Parameters ---------- max_iter : None or int > 0 Maximum number of iterations to yield. If ``None``, exhaust the stream. Yields ------ obj : Objects yielded by the streamer provided on init. See Also -------- cycle : force an infinite stream.
def resume(env, identifier): vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') env.client['Virtual_Guest'].resume(id=vs_id)
Resumes a paused virtual server.
def check_length_of_shape_or_intercept_names(name_list, num_alts, constrained_param, list_title): if len(name_list) != (num_alts - constrained_param): msg_1 = "{} is of the wrong length:".format(list_title) msg_2 = "len({}) == {}".format(list_title, len(name_list)) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}".format(correct_length) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None.
def choice(self, board: Union[chess.Board, int], *, minimum_weight: int = 1, exclude_moves: Container[chess.Move] = (), random=random) -> Entry: chosen_entry = None for i, entry in enumerate(self.find_all(board, minimum_weight=minimum_weight, exclude_moves=exclude_moves)): if chosen_entry is None or random.randint(0, i) == i: chosen_entry = entry if chosen_entry is None: raise IndexError() return chosen_entry
Uniformly selects a random entry for the given position. :raises: :exc:`IndexError` if no entries are found.
async def start_child(): logger.info('Started to watch for code changes') loop = asyncio.get_event_loop() watcher = aionotify.Watcher() flags = ( aionotify.Flags.MODIFY | aionotify.Flags.DELETE | aionotify.Flags.ATTRIB | aionotify.Flags.MOVED_TO | aionotify.Flags.MOVED_FROM | aionotify.Flags.CREATE | aionotify.Flags.DELETE_SELF | aionotify.Flags.MOVE_SELF ) watched_dirs = list_dirs() for dir_name in watched_dirs: watcher.watch(path=dir_name, flags=flags) await watcher.setup(loop) while True: evt = await watcher.get_event() file_path = path.join(evt.alias, evt.name) if file_path in watched_dirs or file_path.endswith('.py'): await asyncio.sleep(settings.CODE_RELOAD_DEBOUNCE) break watcher.close() exit_for_reload()
Start the child process that will look for changes in modules.
def to_array(self): array = super(SuccessfulPayment, self).to_array() array['currency'] = u(self.currency) array['total_amount'] = int(self.total_amount) array['invoice_payload'] = u(self.invoice_payload) array['telegram_payment_charge_id'] = u(self.telegram_payment_charge_id) array['provider_payment_charge_id'] = u(self.provider_payment_charge_id) if self.shipping_option_id is not None: array['shipping_option_id'] = u(self.shipping_option_id) if self.order_info is not None: array['order_info'] = self.order_info.to_array() return array
Serializes this SuccessfulPayment to a dictionary. :return: dictionary representation of this object. :rtype: dict
def find_step_impl(self, step): result = None for si in self.steps[step.step_type]: matches = si.match(step.match) if matches: if result: raise AmbiguousStepImpl(step, result[0], si) args = [self._apply_transforms(arg, si) for arg in matches.groups()] result = si, args if not result: raise UndefinedStepImpl(step) return result
Find the implementation of the step for the given match string. Returns the StepImpl object corresponding to the implementation, and the arguments to the step implementation. If no implementation is found, raises UndefinedStepImpl. If more than one implementation is found, raises AmbiguousStepImpl. Each of the arguments returned will have been transformed by the first matching transform implementation.
def do_up(self,args): parser = CommandArgumentParser("up") args = vars(parser.parse_args(args)) if None == self.parent: print "You're at the root. Try 'quit' to quit" else: return True
Navigate up by one level. For example, if you are in `(aws)/stack:.../asg:.../`, executing `up` will place you in `(aws)/stack:.../`. up -h for more details
def is_iterable_of_int(l): r if not is_iterable(l): return False return all(is_int(value) for value in l)
r""" Checks if l is iterable and contains only integral types
def run_iqtree(phy, model, threads, cluster, node): if threads > 24: ppn = 24 else: ppn = threads tree = '%s.treefile' % (phy) if check(tree) is False: if model is False: model = 'TEST' dir = os.getcwd() command = 'iqtree-omp -s %s -m %s -nt %s -quiet' % \ (phy, model, threads) if cluster is False: p = Popen(command, shell = True) else: if node is False: node = '1' qsub = 'qsub -l nodes=%s:ppn=%s -m e -N iqtree' % (node, ppn) command = 'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree' \ % (dir, phy, command, dir) re_call = 'cd %s; %s --no-fast --iq' % (dir.rsplit('/', 1)[0], ' '.join(sys.argv)) p = Popen('echo "%s;%s" | %s' % (command, re_call, qsub), shell = True) p.communicate() return tree
run IQ-Tree
def clone(cls, repo_location, repo_dir=None, branch_or_tag=None, temp=False): if temp: reponame = repo_location.rsplit('/', 1)[-1] suffix = '%s.temp_simpl_GitRepo' % '_'.join( [str(x) for x in (reponame, branch_or_tag) if x]) repo_dir = create_tempdir(suffix=suffix, delete=True) else: repo_dir = repo_dir or os.getcwd() git_clone(repo_dir, repo_location, branch_or_tag=branch_or_tag) return cls(repo_dir)
Clone repo at repo_location into repo_dir and checkout branch_or_tag. Defaults into current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be cloned into it. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored. If branch_or_tag is not specified, the HEAD of the primary branch of the cloned repo is checked out.
def init_celery(project_name): os.environ.setdefault('DJANGO_SETTINGS_MODULE', '%s.settings' % project_name) app = Celery(project_name) app.config_from_object('django.conf:settings') app.autodiscover_tasks(settings.INSTALLED_APPS, related_name='tasks') return app
init celery app without the need of redundant code
def encode_packet(packet: dict) -> str: if packet['protocol'] == 'rfdebug': return '10;RFDEBUG=' + packet['command'] + ';' elif packet['protocol'] == 'rfudebug': return '10;RFDEBUG=' + packet['command'] + ';' else: return SWITCH_COMMAND_TEMPLATE.format( node=PacketHeader.master.value, **packet )
Construct packet string from packet dictionary. >>> encode_packet({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) '10;newkaku;000001;01;on;'
def _get_adjustment(mag, year, mmin, completeness_year, t_f, mag_inc=0.1): if len(completeness_year) == 1: if (mag >= mmin) and (year >= completeness_year[0]): return 1.0 else: return False kval = int(((mag - mmin) / mag_inc)) + 1 if (kval >= 1) and (year >= completeness_year[kval - 1]): return t_f else: return False
If the magnitude is greater than the minimum in the completeness table and the year is greater than the corresponding completeness year then return the Weichert factor :param float mag: Magnitude of an earthquake :param float year: Year of earthquake :param np.ndarray completeness_table: Completeness table :param float mag_inc: Magnitude increment :param float t_f: Weichert adjustment factor :returns: Weichert adjustment factor is event is in complete part of catalogue (0.0 otherwise)