code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _registrant_publication(reg_pub, rules): """ Separate the registration from the publication in a given string. :param reg_pub: A string of digits representing a registration and publication. :param rules: A list of RegistrantRules which designate where to separate the values in the string. :returns: A (registrant, publication) tuple of strings. """ for rule in rules: if rule.min <= reg_pub <= rule.max: reg_len = rule.registrant_length break else: raise Exception('Registrant/Publication not found in registrant ' 'rule list.') registrant, publication = reg_pub[:reg_len], reg_pub[reg_len:] return registrant, publication
Separate the registration from the publication in a given string. :param reg_pub: A string of digits representing a registration and publication. :param rules: A list of RegistrantRules which designate where to separate the values in the string. :returns: A (registrant, publication) tuple of strings.
def Pack(self): """Returns this message in an over-the-wire format.""" return struct.pack(self.format, self.command, self.arg0, self.arg1, len(self.data), self.checksum, self.magic)
Returns this message in an over-the-wire format.
def use_isolated_vault_view(self): """Pass through to provider AuthorizationLookupSession.use_isolated_vault_view""" self._vault_view = ISOLATED # self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_isolated_vault_view() except AttributeError: pass
Pass through to provider AuthorizationLookupSession.use_isolated_vault_view
def parse_data_line(self, line): """ Parses the data line into a dictionary for the importer """ it = self._generate(line) reader = csv.DictReader(it, fieldnames=self.headers) values = reader.next() values['DefaultResult'] = 'ResidualError' values['LineName'] = re.sub(r'\W', '', values['LineName'].strip()) values['Concentration'] = values['Cc'].strip() values['StandardDeviation'] = values['SD'].strip() values['ResidualError'] = values['RSD'].strip() values['NetIntensity'] = values['Net_Intensity'].strip().split('/') values['Remarks'] = '' values['TestLine'] = '' self._addRawResult(self._resid, {values['LineName']: values}, False) return 0
Parses the data line into a dictionary for the importer
def apply_changes(self, other, with_buffer=False): """Applies updates from the buffer of another filter. Params: other (MeanStdFilter): Other filter to apply info from with_buffer (bool): Flag for specifying if the buffer should be copied from other. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, 1.5, 2] >>> b = MeanStdFilter(()) >>> b(10) >>> a.apply_changes(b, with_buffer=False) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [3, 4.333333333333333, 2] >>> a.apply_changes(b, with_buffer=True) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [4, 5.75, 1] """ self.rs.update(other.buffer) if with_buffer: self.buffer = other.buffer.copy()
Applies updates from the buffer of another filter. Params: other (MeanStdFilter): Other filter to apply info from with_buffer (bool): Flag for specifying if the buffer should be copied from other. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, 1.5, 2] >>> b = MeanStdFilter(()) >>> b(10) >>> a.apply_changes(b, with_buffer=False) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [3, 4.333333333333333, 2] >>> a.apply_changes(b, with_buffer=True) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [4, 5.75, 1]
def push_results(self, kill_event): """ Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die. """ logger.debug("[RESULT_PUSH_THREAD] Starting thread") push_poll_period = max(10, self.poll_period) / 1000 # push_poll_period must be atleast 10 ms logger.debug("[RESULT_PUSH_THREAD] push poll period: {}".format(push_poll_period)) last_beat = time.time() items = [] while not kill_event.is_set(): try: r = self.pending_result_queue.get(block=True, timeout=push_poll_period) items.append(r) except queue.Empty: pass except Exception as e: logger.exception("[RESULT_PUSH_THREAD] Got an exception: {}".format(e)) # If we have reached poll_period duration or timer has expired, we send results if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period: last_beat = time.time() if items: self.result_outgoing.send_multipart(items) items = [] logger.critical("[RESULT_PUSH_THREAD] Exiting")
Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def setup_icons(self, ): """Set all icons on buttons :returns: None :rtype: None :raises: None """ folder_icon = get_icon('glyphicons_144_folder_open.png', asicon=True) self.asset_open_path_tb.setIcon(folder_icon) self.shot_open_path_tb.setIcon(folder_icon) current_icon = get_icon('glyphicons_181_download_alt.png', asicon=True) self.current_pb.setIcon(current_icon) refresh_icon = get_icon('refresh.png', asicon=True) self.refresh_tb.setIcon(refresh_icon)
Set all icons on buttons :returns: None :rtype: None :raises: None
def key_value(minion_id, pillar, # pylint: disable=W0613 pillar_key='redis_pillar'): ''' Looks for key in redis matching minion_id, returns a structure based on the data type of the redis key. String for string type, dict for hash type and lists for lists, sets and sorted sets. pillar_key Pillar key to return data into ''' # Identify key type and process as needed based on that type key_type = __salt__['redis.key_type'](minion_id) if key_type == 'string': return {pillar_key: __salt__['redis.get_key'](minion_id)} elif key_type == 'hash': return {pillar_key: __salt__['redis.hgetall'](minion_id)} elif key_type == 'list': list_size = __salt__['redis.llen'](minion_id) if not list_size: return {} return {pillar_key: __salt__['redis.lrange'](minion_id, 0, list_size - 1)} elif key_type == 'set': return {pillar_key: __salt__['redis.smembers'](minion_id)} elif key_type == 'zset': set_size = __salt__['redis.zcard'](minion_id) if not set_size: return {} return {pillar_key: __salt__['redis.zrange'](minion_id, 0, set_size - 1)} # Return nothing for unhandled types return {}
Looks for key in redis matching minion_id, returns a structure based on the data type of the redis key. String for string type, dict for hash type and lists for lists, sets and sorted sets. pillar_key Pillar key to return data into
def iter_chunks(chunk_size, iterator): "Yield from an iterator in chunks of chunk_size." iterator = iter(iterator) while True: next_chunk = _take_n(chunk_size, iterator) # If len(iterable) % chunk_size == 0, don't return an empty chunk. if next_chunk: yield next_chunk else: break
Yield from an iterator in chunks of chunk_size.
def get_pid_from_tid(self, dwThreadId): """ Retrieves the global ID of the process that owns the thread. @type dwThreadId: int @param dwThreadId: Thread global ID. @rtype: int @return: Process global ID. @raise KeyError: The thread does not exist. """ try: # No good, because in XP and below it tries to get the PID # through the toolhelp API, and that's slow. We don't want # to scan for threads over and over for each call. ## dwProcessId = Thread(dwThreadId).get_pid() # This API only exists in Windows 2003, Vista and above. try: hThread = win32.OpenThread( win32.THREAD_QUERY_LIMITED_INFORMATION, False, dwThreadId) except WindowsError: e = sys.exc_info()[1] if e.winerror != win32.ERROR_ACCESS_DENIED: raise hThread = win32.OpenThread( win32.THREAD_QUERY_INFORMATION, False, dwThreadId) try: return win32.GetProcessIdOfThread(hThread) finally: hThread.close() # If all else fails, go through all processes in the snapshot # looking for the one that owns the thread we're looking for. # If the snapshot was empty the iteration should trigger an # automatic scan. Otherwise, it'll look for the thread in what # could possibly be an outdated snapshot. except Exception: for aProcess in self.iter_processes(): if aProcess.has_thread(dwThreadId): return aProcess.get_pid() # The thread wasn't found, so let's refresh the snapshot and retry. # Normally this shouldn't happen since this function is only useful # for the debugger, so the thread should already exist in the snapshot. self.scan_processes_and_threads() for aProcess in self.iter_processes(): if aProcess.has_thread(dwThreadId): return aProcess.get_pid() # No luck! It appears to be the thread doesn't exist after all. msg = "Unknown thread ID %d" % dwThreadId raise KeyError(msg)
Retrieves the global ID of the process that owns the thread. @type dwThreadId: int @param dwThreadId: Thread global ID. @rtype: int @return: Process global ID. @raise KeyError: The thread does not exist.
def create_session(user): """ Create the login session :param user: UserModel :return: """ def cb(): if user: if __options__.get("require_email_verification") and not user.email_verified: raise exceptions.VerifyEmailError() if flask_login.login_user(user): user.update(last_login_at=utc_now()) return user return None return signals.user_login(cb)
Create the login session :param user: UserModel :return:
def AddEvent(self, event): """Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage file is closed or read-only or if the event data identifier type is not supported. OSError: when the storage file is closed or read-only or if the event data identifier type is not supported. """ self._RaiseIfNotWritable() # TODO: change to no longer allow event_data_identifier is None # after refactoring every parser to generate event data. event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: if not isinstance(event_data_identifier, identifiers.SQLTableIdentifier): raise IOError('Unsupported event data identifier type: {0:s}'.format( type(event_data_identifier))) event.event_data_row_identifier = event_data_identifier.row_identifier self._AddSerializedEvent(event)
Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage file is closed or read-only or if the event data identifier type is not supported. OSError: when the storage file is closed or read-only or if the event data identifier type is not supported.
def catFiles(filesToCat, catFile): """Cats a bunch of files into one file. Ensures a no more than maxCat files are concatenated at each step. """ if len(filesToCat) == 0: #We must handle this case or the cat call will hang waiting for input open(catFile, 'w').close() return maxCat = 25 system("cat %s > %s" % (" ".join(filesToCat[:maxCat]), catFile)) filesToCat = filesToCat[maxCat:] while len(filesToCat) > 0: system("cat %s >> %s" % (" ".join(filesToCat[:maxCat]), catFile)) filesToCat = filesToCat[maxCat:]
Cats a bunch of files into one file. Ensures a no more than maxCat files are concatenated at each step.
def qos_map_cos_mutation_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") cos_mutation = ET.SubElement(map, "cos-mutation") name = ET.SubElement(cos_mutation, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def InterpretWaveform(raw, integersOnly=False, headersOnly=False, noTimeArray=False): """ Take the raw binary from a file saved from the LeCroy, read from a file using the 2 lines: with open(filename, "rb") as file: raw = file.read() And extracts various properties of the saved time trace. Parameters ---------- raw : bytes Bytes object containing the binary contents of the saved raw/trc file integersOnly : bool, optional If True, only returns the unprocessed integers (read from the ADC) rather than the signal in volts. Defaults to False. headersOnly : bool, optional If True, only returns the file header. Defaults to False. noTimeArray : bool, optional If true returns timeStart, timeStop and timeStep and doesn't create the time array Returns ------- WAVEDESC : dict dictionary containing some properties of the time trace and oscilloscope settings extracted from the header file. x : ndarray / tuple The array of time values recorded by the oscilloscope or, if noTimeArray is True, returns a tuplef of (timeStart, timeStop, timeStep) y : ndarray The array of voltage values recorded by the oscilloscope integers : ndarray The array of raw integers recorded from the ADC and stored in the binary file MissingData : bool bool stating if any data was missing """ MissingData = False from struct import unpack if raw[0:1] != b'#': cmd = raw.split(b',')[0] # "C1:WF ALL" or similar wave = raw[len(cmd)+1:] # Remove the above command text (and trailing else: wave = raw del raw # if wave[0:1] != b'#': # warnings.warn('Waveform format not as expected, time trace may be missing data') # MissingData = True n = int(wave[1:2]) # number of digits in length of data N = int(wave[2:2+n]) # number describing length of data if wave.endswith(b'\n'): wave = wave[:-1] wave = wave[2+n:] # if N != len(wave): # warnings.warn('Length of waveform not as expected, time trace may be missing data') # MissingData = True # Code to parse WAVEDESC generated by parsing template, returned from scope query "TEMPLATE?" # Note that this is not well tested and will not handle unusual settings WAVEDESC = dict() WAVEDESC['DESCRIPTOR_NAME'] = wave[0:16].strip(b'\x00') WAVEDESC['TEMPLATE_NAME'] = wave[16:32].strip(b'\x00') WAVEDESC['COMM_TYPE'] = {0: 'byte',1: 'word'}[unpack(b"<H", wave[32:34])[0]] WAVEDESC['COMM_ORDER'] = {0: 'HIFIRST',1: 'LOFIRST'}[unpack("<H", wave[34:36])[0]] WAVEDESC['WAVE_DESCRIPTOR'] = unpack('<l', wave[36:40])[0] WAVEDESC['USER_TEXT'] = unpack('<l', wave[40:44])[0] WAVEDESC['RES_DESC1'] = unpack('<l', wave[44:48])[0] WAVEDESC['TRIGTIME_ARRAY'] = unpack('<l', wave[48:52])[0] WAVEDESC['RIS_TIME_ARRAY'] = unpack('<l', wave[52:56])[0] WAVEDESC['RES_ARRAY1'] = unpack('<l', wave[56:60])[0] WAVEDESC['WAVE_ARRAY_1'] = unpack('<l', wave[60:64])[0] WAVEDESC['WAVE_ARRAY_2'] = unpack('<l', wave[64:68])[0] WAVEDESC['RES_ARRAY2'] = unpack('<l', wave[68:72])[0] WAVEDESC['RES_ARRAY3'] = unpack('<l', wave[72:76])[0] WAVEDESC['INSTRUMENT_NAME'] = wave[76:92].strip(b'\x00') WAVEDESC['INSTRUMENT_NUMBER'] = unpack('<l', wave[92:96])[0] WAVEDESC['TRACE_LABEL'] = wave[96:112].strip(b'\x00') WAVEDESC['RESERVED1'] = unpack('<h', wave[112:114])[0] WAVEDESC['RESERVED2'] = unpack('<h', wave[114:116])[0] WAVEDESC['WAVE_ARRAY_COUNT'] = unpack('<l', wave[116:120])[0] WAVEDESC['PNTS_PER_SCREEN'] = unpack('<l', wave[120:124])[0] WAVEDESC['FIRST_VALID_PNT'] = unpack('<l', wave[124:128])[0] WAVEDESC['LAST_VALID_PNT'] = unpack('<l', wave[128:132])[0] WAVEDESC['FIRST_POINT'] = unpack('<l', wave[132:136])[0] WAVEDESC['SPARSING_FACTOR'] = unpack('<l', wave[136:140])[0] WAVEDESC['SEGMENT_INDEX'] = unpack('<l', wave[140:144])[0] WAVEDESC['SUBARRAY_COUNT'] = unpack('<l', wave[144:148])[0] WAVEDESC['SWEEPS_PER_ACQ'] = unpack('<l', wave[148:152])[0] WAVEDESC['POINTS_PER_PAIR'] = unpack('<h', wave[152:154])[0] WAVEDESC['PAIR_OFFSET'] = unpack('<h', wave[154:156])[0] WAVEDESC['VERTICAL_GAIN'] = unpack('<f', wave[156:160])[0] WAVEDESC['VERTICAL_OFFSET'] = unpack('<f', wave[160:164])[0] WAVEDESC['MAX_VALUE'] = unpack('<f', wave[164:168])[0] WAVEDESC['MIN_VALUE'] = unpack('<f', wave[168:172])[0] WAVEDESC['NOMINAL_BITS'] = unpack('<h', wave[172:174])[0] WAVEDESC['NOM_SUBARRAY_COUNT'] = unpack('<h', wave[174:176])[0] WAVEDESC['HORIZ_INTERVAL'] = unpack('<f', wave[176:180])[0] WAVEDESC['HORIZ_OFFSET'] = unpack('<d', wave[180:188])[0] WAVEDESC['PIXEL_OFFSET'] = unpack('<d', wave[188:196])[0] WAVEDESC['VERTUNIT'] = wave[196:244].strip(b'\x00') WAVEDESC['HORUNIT'] = wave[244:292].strip(b'\x00') WAVEDESC['HORIZ_UNCERTAINTY'] = unpack('<f', wave[292:296])[0] WAVEDESC['TRIGGER_TIME'] = wave[296:312] # Format time_stamp not implemented WAVEDESC['ACQ_DURATION'] = unpack('<f', wave[312:316])[0] WAVEDESC['RECORD_TYPE'] = {0: 'single_sweep',1: 'interleaved',2: 'histogram',3: 'graph',4: 'filter_coefficient',5: 'complex',6: 'extrema',7: 'sequence_obsolete',8: 'centered_RIS',9: 'peak_detect'}[unpack("<H", wave[316:318])[0]] WAVEDESC['PROCESSING_DONE'] = {0: 'no_processing',1: 'fir_filter',2: 'interpolated',3: 'sparsed',4: 'autoscaled',5: 'no_result',6: 'rolling',7: 'cumulative'}[unpack("<H", wave[318:320])[0]] WAVEDESC['RESERVED5'] = unpack('<h', wave[320:322])[0] WAVEDESC['RIS_SWEEPS'] = unpack('<h', wave[322:324])[0] WAVEDESC['TIMEBASE'] = {0: '1_ps/div',1: '2_ps/div',2: '5_ps/div',3: '10_ps/div',4: '20_ps/div',5: '50_ps/div',6: '100_ps/div',7: '200_ps/div',8: '500_ps/div',9: '1_ns/div',10: '2_ns/div',11: '5_ns/div',12: '10_ns/div',13: '20_ns/div',14: '50_ns/div',15: '100_ns/div',16: '200_ns/div',17: '500_ns/div',18: '1_us/div',19: '2_us/div',20: '5_us/div',21: '10_us/div',22: '20_us/div',23: '50_us/div',24: '100_us/div',25: '200_us/div',26: '500_us/div',27: '1_ms/div',28: '2_ms/div',29: '5_ms/div',30: '10_ms/div',31: '20_ms/div',32: '50_ms/div',33: '100_ms/div',34: '200_ms/div',35: '500_ms/div',36: '1_s/div',37: '2_s/div',38: '5_s/div',39: '10_s/div',40: '20_s/div',41: '50_s/div',42: '100_s/div',43: '200_s/div',44: '500_s/div',45: '1_ks/div',46: '2_ks/div',47: '5_ks/div',100: 'EXTERNAL'}[unpack("<H", wave[324:326])[0]] WAVEDESC['VERT_COUPLING'] = {0: 'DC_50_Ohms',1: 'ground',2: 'DC_1MOhm',3: 'ground',4: 'AC_1MOhm'}[unpack("<H", wave[326:328])[0]] WAVEDESC['PROBE_ATT'] = unpack('<f', wave[328:332])[0] WAVEDESC['FIXED_VERT_GAIN'] = {0: '1_uV/div',1: '2_uV/div',2: '5_uV/div',3: '10_uV/div',4: '20_uV/div',5: '50_uV/div',6: '100_uV/div',7: '200_uV/div',8: '500_uV/div',9: '1_mV/div',10: '2_mV/div',11: '5_mV/div',12: '10_mV/div',13: '20_mV/div',14: '50_mV/div',15: '100_mV/div',16: '200_mV/div',17: '500_mV/div',18: '1_V/div',19: '2_V/div',20: '5_V/div',21: '10_V/div',22: '20_V/div',23: '50_V/div',24: '100_V/div',25: '200_V/div',26: '500_V/div',27: '1_kV/div'}[unpack("<H", wave[332:334])[0]] WAVEDESC['BANDWIDTH_LIMIT'] = {0: 'off',1: 'on'}[unpack("<H", wave[334:336])[0]] WAVEDESC['VERTICAL_VERNIER'] = unpack('<f', wave[336:340])[0] WAVEDESC['ACQ_VERT_OFFSET'] = unpack('<f', wave[340:344])[0] WAVEDESC['WAVE_SOURCE'] = {0: 'CHANNEL_1',1: 'CHANNEL_2',2: 'CHANNEL_3',3: 'CHANNEL_4',9: 'UNKNOWN'}[unpack("<H", wave[344:346])[0]] if len(wave[346:]) != WAVEDESC['WAVE_ARRAY_1']: warnings.warn('Binary data not the expected length, time trace may be missing data') MissingData = True if headersOnly: return WAVEDESC, MissingData else: from numpy import fromstring, int16, arange if MissingData != True: integers = fromstring(wave[346:], dtype=int16) else: integers = fromstring(wave[346:][:-1], dtype=int16) if integersOnly: return (WAVEDESC, integers, MissingData) elif noTimeArray: y = integers * WAVEDESC['VERTICAL_GAIN'] - WAVEDESC['VERTICAL_OFFSET'] x = arange(len(integers)) * WAVEDESC['HORIZ_INTERVAL'] + WAVEDESC['HORIZ_OFFSET'] timeStart = x[0] timeStop = x[-1] timeStep = x[1]-x[0] return (WAVEDESC, (timeStart, timeStop, timeStep), y, integers, MissingData) else: y = integers * WAVEDESC['VERTICAL_GAIN'] - WAVEDESC['VERTICAL_OFFSET'] x = arange(len(integers)) * WAVEDESC['HORIZ_INTERVAL'] + WAVEDESC['HORIZ_OFFSET'] return (WAVEDESC, x, y, integers, MissingData)
Take the raw binary from a file saved from the LeCroy, read from a file using the 2 lines: with open(filename, "rb") as file: raw = file.read() And extracts various properties of the saved time trace. Parameters ---------- raw : bytes Bytes object containing the binary contents of the saved raw/trc file integersOnly : bool, optional If True, only returns the unprocessed integers (read from the ADC) rather than the signal in volts. Defaults to False. headersOnly : bool, optional If True, only returns the file header. Defaults to False. noTimeArray : bool, optional If true returns timeStart, timeStop and timeStep and doesn't create the time array Returns ------- WAVEDESC : dict dictionary containing some properties of the time trace and oscilloscope settings extracted from the header file. x : ndarray / tuple The array of time values recorded by the oscilloscope or, if noTimeArray is True, returns a tuplef of (timeStart, timeStop, timeStep) y : ndarray The array of voltage values recorded by the oscilloscope integers : ndarray The array of raw integers recorded from the ADC and stored in the binary file MissingData : bool bool stating if any data was missing
def speed(self): """ Returns the current motor speed in tacho counts per second. Note, this is not necessarily degrees (although it is for LEGO motors). Use the `count_per_rot` attribute to convert this value to RPM or deg/sec. """ self._speed, value = self.get_attr_int(self._speed, 'speed') return value
Returns the current motor speed in tacho counts per second. Note, this is not necessarily degrees (although it is for LEGO motors). Use the `count_per_rot` attribute to convert this value to RPM or deg/sec.
def copy_config_input_source_config_source_candidate_candidate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") copy_config = ET.Element("copy_config") config = copy_config input = ET.SubElement(copy_config, "input") source = ET.SubElement(input, "source") config_source = ET.SubElement(source, "config-source") candidate = ET.SubElement(config_source, "candidate") candidate = ET.SubElement(candidate, "candidate") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def turn_physical_on(self,ro=None,vo=None): """ NAME: turn_physical_on PURPOSE: turn on automatic returning of outputs in physical units INPUT: ro= reference distance (kpc; can be Quantity) vo= reference velocity (km/s; can be Quantity) OUTPUT: (none) HISTORY: 2016-01-19 - Written - Bovy (UofT) """ self._roSet= True self._voSet= True if not ro is None: if _APY_LOADED and isinstance(ro,units.Quantity): ro= ro.to(units.kpc).value self._ro= ro if not vo is None: if _APY_LOADED and isinstance(vo,units.Quantity): vo= vo.to(units.km/units.s).value self._vo= vo self._orb.turn_physical_on(ro=ro,vo=vo)
NAME: turn_physical_on PURPOSE: turn on automatic returning of outputs in physical units INPUT: ro= reference distance (kpc; can be Quantity) vo= reference velocity (km/s; can be Quantity) OUTPUT: (none) HISTORY: 2016-01-19 - Written - Bovy (UofT)
def list_blobs( self, max_results=None, page_token=None, prefix=None, delimiter=None, versions=None, projection="noAcl", fields=None, client=None, ): """Return an iterator used to find blobs in the bucket. If :attr:`user_project` is set, bills the API request to that project. :type max_results: int :param max_results: (Optional) The maximum number of blobs in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. :type page_token: str :param page_token: (Optional) If present, return the next batch of blobs, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type prefix: str :param prefix: (Optional) prefix used to filter blobs. :type delimiter: str :param delimiter: (Optional) Delimiter, used with ``prefix`` to emulate hierarchy. :type versions: bool :param versions: (Optional) Whether object versions should be returned as separate blobs. :type projection: str :param projection: (Optional) If used, must be 'full' or 'noAcl'. Defaults to ``'noAcl'``. Specifies the set of properties to return. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each blob returned: ``'items/contentLanguage,nextPageToken'``. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` in this bucket matching the arguments. """ extra_params = {"projection": projection} if prefix is not None: extra_params["prefix"] = prefix if delimiter is not None: extra_params["delimiter"] = delimiter if versions is not None: extra_params["versions"] = versions if fields is not None: extra_params["fields"] = fields if self.user_project is not None: extra_params["userProject"] = self.user_project client = self._require_client(client) path = self.path + "/o" iterator = page_iterator.HTTPIterator( client=client, api_request=client._connection.api_request, path=path, item_to_value=_item_to_blob, page_token=page_token, max_results=max_results, extra_params=extra_params, page_start=_blobs_page_start, ) iterator.bucket = self iterator.prefixes = set() return iterator
Return an iterator used to find blobs in the bucket. If :attr:`user_project` is set, bills the API request to that project. :type max_results: int :param max_results: (Optional) The maximum number of blobs in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. :type page_token: str :param page_token: (Optional) If present, return the next batch of blobs, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type prefix: str :param prefix: (Optional) prefix used to filter blobs. :type delimiter: str :param delimiter: (Optional) Delimiter, used with ``prefix`` to emulate hierarchy. :type versions: bool :param versions: (Optional) Whether object versions should be returned as separate blobs. :type projection: str :param projection: (Optional) If used, must be 'full' or 'noAcl'. Defaults to ``'noAcl'``. Specifies the set of properties to return. :type fields: str :param fields: (Optional) Selector specifying which fields to include in a partial response. Must be a list of fields. For example to get a partial response with just the next page token and the language of each blob returned: ``'items/contentLanguage,nextPageToken'``. :type client: :class:`~google.cloud.storage.client.Client` :param client: (Optional) The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` in this bucket matching the arguments.
def _get_rows(self, options): """Return only those data rows that should be printed, based on slicing and sorting. Arguments: options - dictionary of option settings.""" if options["oldsortslice"]: rows = copy.deepcopy(self._rows[options["start"]:options["end"]]) else: rows = copy.deepcopy(self._rows) # Sort if options["sortby"]: sortindex = self._field_names.index(options["sortby"]) # Decorate rows = [[row[sortindex]] + row for row in rows] # Sort rows.sort(reverse=options["reversesort"], key=options["sort_key"]) # Undecorate rows = [row[1:] for row in rows] # Slice if necessary if not options["oldsortslice"]: rows = rows[options["start"]:options["end"]] return rows
Return only those data rows that should be printed, based on slicing and sorting. Arguments: options - dictionary of option settings.
def salt_extend(extension, name, description, salt_dir, merge): ''' Quickstart for developing on the saltstack installation .. versionadded:: 2016.11.0 ''' import salt.utils.extend salt.utils.extend.run(extension=extension, name=name, description=description, salt_dir=salt_dir, merge=merge)
Quickstart for developing on the saltstack installation .. versionadded:: 2016.11.0
def _check_with_label(self, selector, checked, locator=None, allow_label_click=None, visible=None, wait=None, **kwargs): """ Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ if allow_label_click is None: allow_label_click = capybara.automatic_label_click @self.synchronize(wait=BaseQuery.normalize_wait(wait)) def check_with_label(): element = None try: element = self.find(selector, locator, visible=visible, **kwargs) element.set(checked) except Exception as e: if not allow_label_click or not self._should_catch_error(e): raise try: if not element: element = self.find(selector, locator, visible="all", **kwargs) label = self.find("label", field=element, visible=True) if element.checked != checked: label.click() except Exception: raise e check_with_label()
Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
def _escape(s): """Escape commas, tabs, newlines and dashes in a string Commas are encoded as tabs """ assert isinstance(s, str), \ "expected %s but got %s; value=%s" % (type(str), type(s), s) s = s.replace("\\", "\\\\") s = s.replace("\n", "\\n") s = s.replace("\t", "\\t") s = s.replace(",", "\t") return s
Escape commas, tabs, newlines and dashes in a string Commas are encoded as tabs
def get_queryset(self, request): """Limit to TenantGroups that this user can access.""" qs = super(TenantGroupAdmin, self).get_queryset(request) if not request.user.is_superuser: qs = qs.filter(tenantrole__user=request.user, tenantrole__role=TenantRole.ROLE_GROUP_MANAGER) return qs
Limit to TenantGroups that this user can access.
def _tarjan(self, function, order, stack, data): """Tarjan's strongly connected components algorithm. See also: - http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm """ try: func_data = data[function.id] return order except KeyError: func_data = self._TarjanData(order) data[function.id] = func_data order += 1 pos = len(stack) stack.append(function) func_data.onstack = True for call in compat_itervalues(function.calls): try: callee_data = data[call.callee_id] if callee_data.onstack: func_data.lowlink = min(func_data.lowlink, callee_data.order) except KeyError: callee = self.functions[call.callee_id] order = self._tarjan(callee, order, stack, data) callee_data = data[call.callee_id] func_data.lowlink = min(func_data.lowlink, callee_data.lowlink) if func_data.lowlink == func_data.order: # Strongly connected component found members = stack[pos:] del stack[pos:] if len(members) > 1: cycle = Cycle() for member in members: cycle.add_function(member) data[member.id].onstack = False else: for member in members: data[member.id].onstack = False return order
Tarjan's strongly connected components algorithm. See also: - http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
def reverted(name, snapshot=None, cleanup=False): # pylint: disable=redefined-outer-name ''' .. deprecated:: 2016.3.0 Reverts to the particular snapshot. .. versionadded:: 2016.3.0 .. code-block:: yaml domain_name: virt.reverted: - cleanup: True domain_name_1: virt.reverted: - snapshot: snapshot_name - cleanup: False ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} try: domains = fnmatch.filter(__salt__['virt.list_domains'](), name) if not domains: ret['comment'] = 'No domains found for criteria "{0}"'.format(name) else: ignored_domains = list() if len(domains) > 1: ret['changes'] = {'reverted': list()} for domain in domains: result = {} try: result = __salt__['virt.revert_snapshot'](domain, snapshot=snapshot, cleanup=cleanup) result = {'domain': domain, 'current': result['reverted'], 'deleted': result['deleted']} except CommandExecutionError as err: if len(domains) > 1: ignored_domains.append({'domain': domain, 'issue': six.text_type(err)}) if len(domains) > 1: if result: ret['changes']['reverted'].append(result) else: ret['changes'] = result break ret['result'] = len(domains) != len(ignored_domains) if ret['result']: ret['comment'] = 'Domain{0} has been reverted'.format(len(domains) > 1 and "s" or "") if ignored_domains: ret['changes']['ignored'] = ignored_domains if not ret['changes']['reverted']: ret['changes'].pop('reverted') except libvirt.libvirtError as err: ret['comment'] = six.text_type(err) except CommandExecutionError as err: ret['comment'] = six.text_type(err) return ret
.. deprecated:: 2016.3.0 Reverts to the particular snapshot. .. versionadded:: 2016.3.0 .. code-block:: yaml domain_name: virt.reverted: - cleanup: True domain_name_1: virt.reverted: - snapshot: snapshot_name - cleanup: False
def _get_sortgo(self): """Get function for sorting GO terms in a list of namedtuples.""" if 'sortgo' in self.datobj.kws: return self.datobj.kws['sortgo'] return self.datobj.grprdflt.gosubdag.prt_attr['sort'] + "\n"
Get function for sorting GO terms in a list of namedtuples.
async def reply(self, *args, **kwargs): """ Replies to the message (as a reply). Shorthand for `telethon.client.messages.MessageMethods.send_message` with both ``entity`` and ``reply_to`` already set. """ kwargs['reply_to'] = self.id return await self._client.send_message( await self.get_input_chat(), *args, **kwargs)
Replies to the message (as a reply). Shorthand for `telethon.client.messages.MessageMethods.send_message` with both ``entity`` and ``reply_to`` already set.
def extract_flash(prihdr, scihdu): """Extract postflash data from ``FLSHFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- flash : ndarray or `None` Postflash, if any. Subtract this to apply ``FLSHCORR``. """ flshfile = prihdr.get('FLSHFILE', 'N/A') flashsta = prihdr.get('FLASHSTA', 'N/A') flashdur = prihdr.get('FLASHDUR', 0.0) if flshfile == 'N/A' or flashdur <= 0: return None if flashsta != 'SUCCESSFUL': warnings.warn('Flash status is {0}'.format(flashsta), AstropyUserWarning) flshfile = from_irafpath(flshfile) ampstring = prihdr['CCDAMP'] with fits.open(flshfile) as hduflash: if ampstring == 'ABCD': flash = np.concatenate( (hduflash['sci', 1].data, hduflash['sci', 2].data[::-1, :]), axis=1) elif ampstring in ('A', 'B', 'AB'): flash = extract_ref(scihdu, hduflash['sci', 2]) else: flash = extract_ref(scihdu, hduflash['sci', 1]) flash = flash * flashdur return flash
Extract postflash data from ``FLSHFILE``. Parameters ---------- prihdr : obj FITS primary header HDU. scihdu : obj Extension HDU of the science image. This is only used to extract subarray data. Returns ------- flash : ndarray or `None` Postflash, if any. Subtract this to apply ``FLSHCORR``.
def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from .validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass as e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out
Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages.
def prepare_metadata_for_build_wheel(metadata_directory, config_settings): """Invoke optional prepare_metadata_for_build_wheel Implements a fallback by building a wheel if the hook isn't defined. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: return hook(metadata_directory, config_settings)
Invoke optional prepare_metadata_for_build_wheel Implements a fallback by building a wheel if the hook isn't defined.
def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): ''' Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1 ''' mindir = os.path.join(cachedir, 'min') if not os.path.isdir(mindir): os.makedirs(mindir) mintar = os.path.join(mindir, 'min.tgz') minver = os.path.join(mindir, 'version') pyminver = os.path.join(mindir, '.min-gen-py-version') salt_call = os.path.join(mindir, 'salt-call') with salt.utils.files.fopen(salt_call, 'wb') as fp_: fp_.write(_get_salt_call()) if os.path.isfile(mintar): if not overwrite: if os.path.isfile(minver): with salt.utils.files.fopen(minver) as fh_: overwrite = fh_.read() != salt.version.__version__ if overwrite is False and os.path.isfile(pyminver): with salt.utils.files.fopen(pyminver) as fh_: overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function else: overwrite = True if overwrite: try: os.remove(mintar) except OSError: pass else: return mintar if _six.PY3: # Let's check for the minimum python 2 version requirement, 2.6 py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; import sys; ' 'print("{0}.{1}".format(*(sys.version_info[:2])));\'' ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd.communicate() if cmd.returncode == 0: py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.')) if py2_version < (2, 6): # Bail! raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin, stdout.strip()) ) elif sys.version_info < (2, 6): # Bail! Though, how did we reached this far in the first place. raise salt.exceptions.SaltSystemExit( 'The minimum required python version to run salt-ssh is "2.6".' ) tops_py_version_mapping = {} tops = get_tops(extra_mods=extra_mods, so_mods=so_mods) if _six.PY2: tops_py_version_mapping['2'] = tops else: tops_py_version_mapping['3'] = tops # TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory. # This would reduce the min size. if _six.PY2 and sys.version_info[0] == 2: # Get python 3 tops py_shell_cmd = ( python3_bin + ' -c \'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout) tops_py_version_mapping['3'] = tops except ValueError: pass if _six.PY3 and sys.version_info[0] == 3: # Get python 2 tops py_shell_cmd = ( python2_bin + ' -c \'from __future__ import print_function; ' 'import sys; import json; import salt.utils.thin; ' 'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' ' '\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods})) ) cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = cmd.communicate() if cmd.returncode == 0: try: tops = salt.utils.json.loads(stdout.decode('utf-8')) tops_py_version_mapping['2'] = tops except ValueError: pass tfp = tarfile.open(mintar, 'w:gz', dereference=True) try: # cwd may not exist if it was removed but salt was run from it start_dir = os.getcwd() except OSError: start_dir = None tempdir = None # This is the absolute minimum set of files required to run salt-call min_files = ( 'salt/__init__.py', 'salt/utils', 'salt/utils/__init__.py', 'salt/utils/atomicfile.py', 'salt/utils/validate', 'salt/utils/validate/__init__.py', 'salt/utils/validate/path.py', 'salt/utils/decorators', 'salt/utils/decorators/__init__.py', 'salt/utils/cache.py', 'salt/utils/xdg.py', 'salt/utils/odict.py', 'salt/utils/minions.py', 'salt/utils/dicttrim.py', 'salt/utils/sdb.py', 'salt/utils/migrations.py', 'salt/utils/files.py', 'salt/utils/parsers.py', 'salt/utils/locales.py', 'salt/utils/lazy.py', 'salt/utils/s3.py', 'salt/utils/dictupdate.py', 'salt/utils/verify.py', 'salt/utils/args.py', 'salt/utils/kinds.py', 'salt/utils/xmlutil.py', 'salt/utils/debug.py', 'salt/utils/jid.py', 'salt/utils/openstack', 'salt/utils/openstack/__init__.py', 'salt/utils/openstack/swift.py', 'salt/utils/asynchronous.py', 'salt/utils/process.py', 'salt/utils/jinja.py', 'salt/utils/rsax931.py', 'salt/utils/context.py', 'salt/utils/minion.py', 'salt/utils/error.py', 'salt/utils/aws.py', 'salt/utils/timed_subprocess.py', 'salt/utils/zeromq.py', 'salt/utils/schedule.py', 'salt/utils/url.py', 'salt/utils/yamlencoding.py', 'salt/utils/network.py', 'salt/utils/http.py', 'salt/utils/gzip_util.py', 'salt/utils/vt.py', 'salt/utils/templates.py', 'salt/utils/aggregation.py', 'salt/utils/yaml.py', 'salt/utils/yamldumper.py', 'salt/utils/yamlloader.py', 'salt/utils/event.py', 'salt/utils/state.py', 'salt/serializers', 'salt/serializers/__init__.py', 'salt/serializers/yamlex.py', 'salt/template.py', 'salt/_compat.py', 'salt/loader.py', 'salt/client', 'salt/client/__init__.py', 'salt/ext', 'salt/ext/__init__.py', 'salt/ext/six.py', 'salt/ext/ipaddress.py', 'salt/version.py', 'salt/syspaths.py', 'salt/defaults', 'salt/defaults/__init__.py', 'salt/defaults/exitcodes.py', 'salt/renderers', 'salt/renderers/__init__.py', 'salt/renderers/jinja.py', 'salt/renderers/yaml.py', 'salt/modules', 'salt/modules/__init__.py', 'salt/modules/test.py', 'salt/modules/selinux.py', 'salt/modules/cmdmod.py', 'salt/modules/saltutil.py', 'salt/minion.py', 'salt/pillar', 'salt/pillar/__init__.py', 'salt/textformat.py', 'salt/log', 'salt/log/__init__.py', 'salt/log/handlers', 'salt/log/handlers/__init__.py', 'salt/log/mixins.py', 'salt/log/setup.py', 'salt/cli', 'salt/cli/__init__.py', 'salt/cli/caller.py', 'salt/cli/daemons.py', 'salt/cli/salt.py', 'salt/cli/call.py', 'salt/fileserver', 'salt/fileserver/__init__.py', 'salt/transport', 'salt/transport/__init__.py', 'salt/transport/client.py', 'salt/exceptions.py', 'salt/grains', 'salt/grains/__init__.py', 'salt/grains/extra.py', 'salt/scripts.py', 'salt/state.py', 'salt/fileclient.py', 'salt/crypt.py', 'salt/config.py', 'salt/beacons', 'salt/beacons/__init__.py', 'salt/payload.py', 'salt/output', 'salt/output/__init__.py', 'salt/output/nested.py', ) for py_ver, tops in _six.iteritems(tops_py_version_mapping): for top in tops: base = os.path.basename(top) top_dirname = os.path.dirname(top) if os.path.isdir(top_dirname): os.chdir(top_dirname) else: # This is likely a compressed python .egg tempdir = tempfile.mkdtemp() egg = zipfile.ZipFile(top_dirname) egg.extractall(tempdir) top = os.path.join(tempdir, base) os.chdir(tempdir) if not os.path.isdir(top): # top is a single file module tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base)) continue for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True): for name in files: if name.endswith(('.pyc', '.pyo')): continue if root.startswith('salt') and os.path.join(root, name) not in min_files: continue tfp.add(os.path.join(root, name), arcname=os.path.join('py{0}'.format(py_ver), root, name)) if tempdir is not None: shutil.rmtree(tempdir) tempdir = None os.chdir(mindir) tfp.add('salt-call') with salt.utils.files.fopen(minver, 'w+') as fp_: fp_.write(salt.version.__version__) with salt.utils.files.fopen(pyminver, 'w+') as fp_: fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function os.chdir(os.path.dirname(minver)) tfp.add('version') tfp.add('.min-gen-py-version') if start_dir: os.chdir(start_dir) tfp.close() return mintar
Generate the salt-min tarball and print the location of the tarball Optional additional mods to include (e.g. mako) can be supplied as a comma delimited string. Permits forcing an overwrite of the output file as well. CLI Example: .. code-block:: bash salt-run min.generate salt-run min.generate mako salt-run min.generate mako,wempy 1 salt-run min.generate overwrite=1
def get_remote_peer_list(self): ''' listPeer 返回列表 { "rtn":0, "peerList": [{ "category": "", "status": 0, "name": "GUNNER_HOME", "vodPort": 43566, "company": "XUNLEI_MIPS_BE_MIPS32", "pid": "8498352EB4F5208X0001", "lastLoginTime": 1412053233, "accesscode": "", "localIP": "", "location": "\u6d59\u6c5f\u7701 \u8054\u901a", "online": 1, "path_list": "C:/", "type": 30, "deviceVersion": 22083310 }] } ''' params = { 'type': 0, 'v': DEFAULT_V, 'ct': 2 } res = self._get('listPeer', params=params) return res['peerList']
listPeer 返回列表 { "rtn":0, "peerList": [{ "category": "", "status": 0, "name": "GUNNER_HOME", "vodPort": 43566, "company": "XUNLEI_MIPS_BE_MIPS32", "pid": "8498352EB4F5208X0001", "lastLoginTime": 1412053233, "accesscode": "", "localIP": "", "location": "\u6d59\u6c5f\u7701 \u8054\u901a", "online": 1, "path_list": "C:/", "type": 30, "deviceVersion": 22083310 }] }
def get_grade_entries(self): """Gets the package list resulting from the search. return: (osid.grading.GradeEntryList) - the grade entry list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.GradeEntryList(self._results, runtime=self._runtime)
Gets the package list resulting from the search. return: (osid.grading.GradeEntryList) - the grade entry list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
def _set_child_joined_alias_using_join_map(child, join_map, alias_map): """ Set the joined alias on the child, for Django <= 1.7.x. :param child: :param join_map: :param alias_map: """ for lhs, table, join_cols in join_map: if lhs is None: continue if lhs == child.alias: relevant_alias = child.related_alias elif lhs == child.related_alias: relevant_alias = child.alias else: continue join_info = alias_map[relevant_alias] if join_info.join_type is None: continue if join_info.lhs_alias in [child.alias, child.related_alias]: child.set_joined_alias(relevant_alias) break
Set the joined alias on the child, for Django <= 1.7.x. :param child: :param join_map: :param alias_map:
def expire_token(self, token): """ Given a token, makes a request to the authentication server to expire it immediately. This is considered a responsible way to log out a user. If you simply remove the session your application has for the user without expiring their token, the user is not _really_ logged out. :param token: The OAuth token you wish to expire :type token: str :returns: If the expiration attempt succeeded. :rtype: bool :raises ApiError: If the expiration attempt failed. """ r = requests.post(self._login_uri("/oauth/token/expire"), data={ "client_id": self.client_id, "client_secret": self.client_secret, "token": token, }) if r.status_code != 200: raise ApiError("Failed to expire token!", r) return True
Given a token, makes a request to the authentication server to expire it immediately. This is considered a responsible way to log out a user. If you simply remove the session your application has for the user without expiring their token, the user is not _really_ logged out. :param token: The OAuth token you wish to expire :type token: str :returns: If the expiration attempt succeeded. :rtype: bool :raises ApiError: If the expiration attempt failed.
def process_text(self, text, format='json'): """Return a mentions JSON object given text. Parameters ---------- text : str Text to be processed. format : str The format of the output to produce, one of "json" or "json_ld". Default: "json" Returns ------- json_dict : dict A JSON object of mentions extracted from text. """ if self.eidos_reader is None: self.initialize_reader() default_arg = lambda x: autoclass('scala.Some')(x) today = datetime.date.today().strftime("%Y-%m-%d") fname = 'default_file_name' annot_doc = self.eidos_reader.extractFromText( text, True, # keep text False, # CAG-relevant only default_arg(today), # doc creation time default_arg(fname) # file name ) if format == 'json': mentions = annot_doc.odinMentions() ser = autoclass(eidos_package + '.serialization.json.WMJSONSerializer') mentions_json = ser.toJsonStr(mentions) elif format == 'json_ld': # We need to get a Scala Seq of annot docs here ml = _list_to_seq([annot_doc]) # We currently do not need toinstantiate the adjective grounder # if we want to reinstate it, we would need to do the following # ag = EidosAdjectiveGrounder.fromConfig( # EidosSystem.defaultConfig.getConfig("adjectiveGrounder")) # We now create a JSON-LD corpus jc = autoclass(eidos_package + '.serialization.json.JLDCorpus') corpus = jc(ml) # Finally, serialize the corpus into JSON string mentions_json = corpus.toJsonStr() json_dict = json.loads(mentions_json) return json_dict
Return a mentions JSON object given text. Parameters ---------- text : str Text to be processed. format : str The format of the output to produce, one of "json" or "json_ld". Default: "json" Returns ------- json_dict : dict A JSON object of mentions extracted from text.
def print_details(self): """Print torrent details""" print("Title:", self.title) print("Category:", self.category) print("Page: ", self.page) print("Size: ", self.size) print("Files: ", self.files) print("Age: ", self.age) print("Seeds:", self.seeders) print("Leechers: ", self.leechers) print("Magnet: ", self.magnet) print("Download: ", self.download) print("Verified:", self.isVerified)
Print torrent details
def integrate(self, coord, datetime_unit=None): """ integrate the array with the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- dim: str, or a sequence of str Coordinate(s) used for the integration. datetime_unit Can be specify the unit if datetime coordinate is used. One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as'} Returns ------- integrated: Dataset See also -------- DataArray.integrate numpy.trapz: corresponding numpy function """ if not isinstance(coord, (list, tuple)): coord = (coord, ) result = self for c in coord: result = result._integrate_one(c, datetime_unit=datetime_unit) return result
integrate the array with the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- dim: str, or a sequence of str Coordinate(s) used for the integration. datetime_unit Can be specify the unit if datetime coordinate is used. One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as'} Returns ------- integrated: Dataset See also -------- DataArray.integrate numpy.trapz: corresponding numpy function
def d2Ibr_dV2(Ybr, V, lam): """ Computes 2nd derivatives of complex branch current w.r.t. voltage. """ nb = len(V) diaginvVm = spdiag(div(matrix(1.0, (nb, 1)), abs(V))) Haa = spdiag(mul(-(Ybr.T * lam), V)) Hva = -1j * Haa * diaginvVm Hav = Hva Hvv = spmatrix([], [], [], (nb, nb)) return Haa, Hav, Hva, Hvv
Computes 2nd derivatives of complex branch current w.r.t. voltage.
def piece_size(model_file=None, model_proto=None, name=None): """Returns the piece size (vocabulary size). Args: model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A scalar representing the vocabulary size. """ return _gen_sentencepiece_processor_op.sentencepiece_get_piece_size( model_file=model_file, model_proto=model_proto, name=name)
Returns the piece size (vocabulary size). Args: model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A scalar representing the vocabulary size.
def clear_thumbnails(self): '''clear all thumbnails from the map''' state = self.state for l in state.layers: keys = state.layers[l].keys()[:] for key in keys: if (isinstance(state.layers[l][key], SlipThumbnail) and not isinstance(state.layers[l][key], SlipIcon)): state.layers[l].pop(key)
clear all thumbnails from the map
def patText(s0): '''make text pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) s = int(round(s0/100.)) p1 = 0 pp1 = int(round(s0/10.)) for pos0 in np.linspace(0,s0,10): cv2.putText(arr, 'helloworld', (p1,int(round(pos0))), cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s, color=255, thickness=s, lineType=cv2.LINE_AA ) if p1: p1 = 0 else: p1 = pp1 return arr.astype(float)
make text pattern
def render(template, context, partials={}, state=None): """ Renders a given mustache template, with sane defaults. """ # Create a new state by default state = state or State() # Add context to the state dict if isinstance(context, Context): state.context = context else: state.context = Context(context) # Add any partials to the state dict if partials: state.partials.push(partials) # Render the rendered template return __render(make_unicode(template), state)
Renders a given mustache template, with sane defaults.
def is_not_none(self, a, message=None): "Check if a value is not None" if a is None: self.log_error("{} is None".format(str(a)), message) return False return True
Check if a value is not None
def sync_one(self, aws_syncr, amazon, bucket): """Make sure this bucket exists and has only attributes we want it to have""" if bucket.permission.statements: permission_document = bucket.permission.document else: permission_document = "" bucket_info = amazon.s3.bucket_info(bucket.name) if not bucket_info.creation_date: amazon.s3.create_bucket(bucket.name, permission_document, bucket) else: amazon.s3.modify_bucket(bucket_info, bucket.name, permission_document, bucket)
Make sure this bucket exists and has only attributes we want it to have
def apply_network(network, x, chunksize=None): """ Apply a pytorch network, potentially in chunks """ network_is_cuda = next(network.parameters()).is_cuda x = torch.from_numpy(x) with torch.no_grad(): if network_is_cuda: x = x.cuda() if chunksize is None: return from_var(network(x)) return np.concatenate( [from_var(network(x[i: i + chunksize])) for i in range(0, len(x), chunksize)])
Apply a pytorch network, potentially in chunks
def get_assessment_part_item_design_session(self, *args, **kwargs): """Gets the ``OsidSession`` associated with the assessment part item design service. return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.* """ if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec raise errors.Unimplemented() if self._proxy_in_args(*args, **kwargs): raise errors.InvalidArgument('A Proxy object was received but not expected.') # pylint: disable=no-member return sessions.AssessmentPartItemDesignSession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment part item design service. return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.*
def UrlGet(url, timeout=10, retries=0): """ Retrieve content from the given URL. """ # in Python 2.6 we can pass timeout to urllib2.urlopen socket.setdefaulttimeout(timeout) attempts = 0 content = None while not content: try: content = urllib2.urlopen(url).read() except urllib2.URLError: attempts = attempts + 1 if attempts > retries: raise IOError('Failed to fetch url: %s' % url) return content
Retrieve content from the given URL.
def load_filter_plugins(entrypoint_group: str) -> Iterable[Filter]: """ Load all blacklist plugins that are registered with pkg_resources Parameters ========== entrypoint_group: str The entrypoint group name to load plugins from Returns ======= List of Blacklist: A list of objects derived from the Blacklist class """ global loaded_filter_plugins enabled_plugins: List[str] = [] config = BandersnatchConfig().config try: config_blacklist_plugins = config["blacklist"]["plugins"] split_plugins = config_blacklist_plugins.split("\n") if "all" in split_plugins: enabled_plugins = ["all"] else: for plugin in split_plugins: if not plugin: continue enabled_plugins.append(plugin) except KeyError: pass # If the plugins for the entrypoint_group have been loaded return them cached_plugins = loaded_filter_plugins.get(entrypoint_group) if cached_plugins: return cached_plugins plugins = set() for entry_point in pkg_resources.iter_entry_points(group=entrypoint_group): plugin_class = entry_point.load() plugin_instance = plugin_class() if "all" in enabled_plugins or plugin_instance.name in enabled_plugins: plugins.add(plugin_instance) loaded_filter_plugins[entrypoint_group] = list(plugins) return plugins
Load all blacklist plugins that are registered with pkg_resources Parameters ========== entrypoint_group: str The entrypoint group name to load plugins from Returns ======= List of Blacklist: A list of objects derived from the Blacklist class
def get_withdrawal_quotas(self, currency): """Get withdrawal quotas for a currency https://docs.kucoin.com/#get-withdrawal-quotas :param currency: Name of currency :type currency: string .. code:: python quotas = client.get_withdrawal_quotas('ETH') :returns: ApiResponse .. code:: python { "currency": "ETH", "availableAmount": 2.9719999, "remainAmount": 2.9719999, "withdrawMinSize": 0.1000000, "limitBTCAmount": 2.0, "innerWithdrawMinFee": 0.00001, "isWithdrawEnabled": true, "withdrawMinFee": 0.0100000, "precision": 7 } :raises: KucoinResponseException, KucoinAPIException """ data = { 'currency': currency } return self._get('withdrawals/quotas', True, data=data)
Get withdrawal quotas for a currency https://docs.kucoin.com/#get-withdrawal-quotas :param currency: Name of currency :type currency: string .. code:: python quotas = client.get_withdrawal_quotas('ETH') :returns: ApiResponse .. code:: python { "currency": "ETH", "availableAmount": 2.9719999, "remainAmount": 2.9719999, "withdrawMinSize": 0.1000000, "limitBTCAmount": 2.0, "innerWithdrawMinFee": 0.00001, "isWithdrawEnabled": true, "withdrawMinFee": 0.0100000, "precision": 7 } :raises: KucoinResponseException, KucoinAPIException
def token(self, i, restrict=None): """ Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token. """ tokens_len = len(self.tokens) if i == tokens_len: # We are at the end, ge the next... tokens_len += self.scan(restrict) if i < tokens_len: if restrict and self.restrictions[i] and restrict > self.restrictions[i]: raise NotImplementedError( "Unimplemented: restriction set changed") return self.tokens[i] raise NoMoreTokens()
Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token.
def save_object(collection, obj): """Save an object ``obj`` to the given ``collection``. ``obj.id`` must be unique across all other existing objects in the given collection. If ``id`` is not present in the object, a *UUID* is assigned as the object's ``id``. Indexes already defined on the ``collection`` are updated after the object is saved. Returns the object. """ if 'id' not in obj: obj.id = uuid() id = obj.id path = object_path(collection, id) temp_path = '%s.temp' % path with open(temp_path, 'w') as f: data = _serialize(obj) f.write(data) shutil.move(temp_path, path) if id in _db[collection].cache: _db[collection].cache[id] = obj _update_indexes_for_mutated_object(collection, obj) return obj
Save an object ``obj`` to the given ``collection``. ``obj.id`` must be unique across all other existing objects in the given collection. If ``id`` is not present in the object, a *UUID* is assigned as the object's ``id``. Indexes already defined on the ``collection`` are updated after the object is saved. Returns the object.
def set_uri(self, uri): ''' Sets the uri to the public object @param uri: a uri @type uri: string ''' publicObj = self.get_public() if publicObj is not None: publicObj.set_uri(uri) else: publicObj = Cpublic() publicObj.set_uri(uri) self.set_public(publicObj)
Sets the uri to the public object @param uri: a uri @type uri: string
def t_NUMBER(self, t): r'\d+\.?\d*' if t.value.find(".") != -1: t.value = float(t.value) else: t.value = int(t.value) return t
r'\d+\.?\d*
def enable_category(self, category: str) -> None: """ Enable an entire category of commands :param category: the category to enable """ for cmd_name in list(self.disabled_commands): func = self.disabled_commands[cmd_name].command_function if hasattr(func, HELP_CATEGORY) and getattr(func, HELP_CATEGORY) == category: self.enable_command(cmd_name)
Enable an entire category of commands :param category: the category to enable
def meanApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the mean within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) / 1000.0 >>> abs(rdd.meanApprox(1000) - r) / r < 0.05 True """ jrdd = self.map(float)._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.meanApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
.. note:: Experimental Approximate operation to return the mean within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) / 1000.0 >>> abs(rdd.meanApprox(1000) - r) / r < 0.05 True
def add_get(self, *args, **kwargs): """ Shortcut for add_route with method GET """ return self.add_route(hdrs.METH_GET, *args, **kwargs)
Shortcut for add_route with method GET
def save_csv(self, csv_location): # type: (str) -> None """ Save the csv to a file """ with open(csv_location, 'w') as csv_handle: writer = csv.writer(csv_handle) for row in self.csv_data: writer.writerow(row)
Save the csv to a file
def remove_ticks(ax, x=False, y=False): """ Remove ticks from axis. Parameters: ax: axes to work on x: if True, remove xticks. Default False. y: if True, remove yticks. Default False. Examples: removeticks(ax, x=True) removeticks(ax, x=True, y=True) """ if x: ax.xaxis.set_ticks_position("none") if y: ax.yaxis.set_ticks_position("none") return ax
Remove ticks from axis. Parameters: ax: axes to work on x: if True, remove xticks. Default False. y: if True, remove yticks. Default False. Examples: removeticks(ax, x=True) removeticks(ax, x=True, y=True)
def neighbor_graph(X, metric='euclidean', k=None, epsilon=None, weighting='none', precomputed=False): '''Build a neighbor graph from pairwise distance information. X : two-dimensional array-like Shape must either be (num_pts, num_dims) or (num_pts, num_pts). k : int, maximum number of nearest neighbors epsilon : float, maximum distance to a neighbor metric : str, type of distance metric (see sklearn.metrics) When metric='precomputed', X is a symmetric distance matrix. weighting : str, one of {'binary', 'none'} When weighting='binary', all edge weights == 1. ''' if k is None and epsilon is None: raise ValueError('Must provide `k` or `epsilon`.') if weighting not in ('binary', 'none'): raise ValueError('Invalid weighting param: %r' % weighting) # TODO: deprecate the precomputed kwarg precomputed = precomputed or (metric == 'precomputed') binary = weighting == 'binary' # Try the fast path, if possible. if not precomputed and epsilon is None: return _sparse_neighbor_graph(X, k, binary, metric) if precomputed: D = X else: D = pairwise_distances(X, metric=metric) return _slow_neighbor_graph(D, k, epsilon, binary)
Build a neighbor graph from pairwise distance information. X : two-dimensional array-like Shape must either be (num_pts, num_dims) or (num_pts, num_pts). k : int, maximum number of nearest neighbors epsilon : float, maximum distance to a neighbor metric : str, type of distance metric (see sklearn.metrics) When metric='precomputed', X is a symmetric distance matrix. weighting : str, one of {'binary', 'none'} When weighting='binary', all edge weights == 1.
def update_profiles(self): """List all profiles in the ipython_dir and cwd. """ for path in [get_ipython_dir(), os.getcwdu()]: for profile in list_profiles_in(path): pd = self.get_profile_dir(profile, path) if profile not in self.profiles: self.log.debug("Adding cluster profile '%s'" % profile) self.profiles[profile] = { 'profile': profile, 'profile_dir': pd, 'status': 'stopped' }
List all profiles in the ipython_dir and cwd.
def gps2_raw_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age, force_mavlink1=False): ''' Second GPS data. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) dgps_numch : Number of DGPS satellites (uint8_t) dgps_age : Age of DGPS info (uint32_t) ''' return self.send(self.gps2_raw_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age), force_mavlink1=force_mavlink1)
Second GPS data. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) dgps_numch : Number of DGPS satellites (uint8_t) dgps_age : Age of DGPS info (uint32_t)
def build_js(): ''' Build BokehJS files (CSS, JS, etc) under the ``bokehjs`` source subdirectory. Also prints a table of statistics about the generated assets (file sizes, etc.) or any error messages if the build fails. Note this function only builds BokehJS assets, it does not install them into the python source tree. ''' print("Building BokehJS... ", end="") sys.stdout.flush() os.chdir('bokehjs') cmd = ["node", "make", 'build', '--emit-error'] t0 = time.time() try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: print(BUILD_EXEC_FAIL_MSG % (cmd, e)) sys.exit(1) finally: os.chdir('..') result = proc.wait() t1 = time.time() if result != 0: indented_msg = "" outmsg = proc.stdout.read().decode('ascii', errors='ignore') outmsg = "\n".join(" " + x for x in outmsg.split("\n")) errmsg = proc.stderr.read().decode('ascii', errors='ignore') errmsg = "\n".join(" " + x for x in errmsg.split("\n")) print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg))) sys.exit(1) indented_msg = "" msg = proc.stdout.read().decode('ascii', errors='ignore') pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL) for line in msg.strip().split("\n"): m = pat.match(line) if not m: continue # skip generate.py output lines stamp, txt = m.groups() indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n" msg = "\n".join(" " + x for x in msg.split("\n")) print(BUILD_SUCCESS_MSG % indented_msg) print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0)))) print() print("Build artifact sizes:") try: def size(*path): return os.stat(join("bokehjs", "build", *path)).st_size / 2**10 print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js")) print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css")) print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js")) print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css")) print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js")) print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css")) print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js")) print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css")) print(" - bokeh-tables.js : %6.1f KB" % size("js", "bokeh-tables.js")) print(" - bokeh-tables.css : %6.1f KB" % size("css", "bokeh-tables.css")) print(" - bokeh-tables.min.js : %6.1f KB" % size("js", "bokeh-tables.min.js")) print(" - bokeh-tables.min.css : %6.1f KB" % size("css", "bokeh-tables.min.css")) print(" - bokeh-api.js : %6.1f KB" % size("js", "bokeh-api.js")) print(" - bokeh-api.min.js : %6.1f KB" % size("js", "bokeh-api.min.js")) except Exception as e: print(BUILD_SIZE_FAIL_MSG % e) sys.exit(1)
Build BokehJS files (CSS, JS, etc) under the ``bokehjs`` source subdirectory. Also prints a table of statistics about the generated assets (file sizes, etc.) or any error messages if the build fails. Note this function only builds BokehJS assets, it does not install them into the python source tree.
def get_playcount(self): """Returns the user's playcount so far.""" doc = self._request(self.ws_prefix + ".getInfo", True) return _number(_extract(doc, "playcount"))
Returns the user's playcount so far.
def _read_file(self, filename): """Return the lines from the given file, ignoring lines that start with comments""" result = [] with open(filename, 'r') as f: lines = f.read().split('\n') for line in lines: nocomment = line.strip().split('#')[0].strip() if nocomment: result.append(nocomment) return result
Return the lines from the given file, ignoring lines that start with comments
def initialize(self, request): """Store the data we'll need to make the postback from the request object.""" if request.method == 'GET': # PDT only - this data is currently unused self.query = request.META.get('QUERY_STRING', '') elif request.method == 'POST': # The following works if paypal sends an ASCII bytestring, which it does. self.query = request.body.decode('ascii') self.ipaddress = request.META.get('REMOTE_ADDR', '')
Store the data we'll need to make the postback from the request object.
def capture_insert(self, *, exclude_fields=()): """Apply :meth:`.TriggerLogAbstract.capture_insert_from_model` for this log.""" return self.capture_insert_from_model(self.table_name, self.record_id, exclude_fields=exclude_fields)
Apply :meth:`.TriggerLogAbstract.capture_insert_from_model` for this log.
def _progress(bytes_received, bytes_total, worker): """Return download progress.""" worker.sig_download_progress.emit( worker.url, worker.path, bytes_received, bytes_total)
Return download progress.
def save(self, to_save, manipulate=True, check_keys=True, **kwargs): """Save a document in this collection. **DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead. .. versionchanged:: 3.0 Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write operations. """ warnings.warn("save is deprecated. Use insert_one or replace_one " "instead", DeprecationWarning, stacklevel=2) common.validate_is_document_type("to_save", to_save) write_concern = None collation = validate_collation_or_none(kwargs.pop('collation', None)) if kwargs: write_concern = WriteConcern(**kwargs) with self._socket_for_writes() as sock_info: if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save): return self._insert(sock_info, to_save, True, check_keys, manipulate, write_concern) else: self._update(sock_info, {"_id": to_save["_id"]}, to_save, True, check_keys, False, manipulate, write_concern, collation=collation) return to_save.get("_id")
Save a document in this collection. **DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead. .. versionchanged:: 3.0 Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write operations.
def urlfetch_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None, follow_redirects=False): """Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet! """ headers = {} if headers is None else dict(headers) headers.update(self.user_agent) try: self.token = yield self.get_token_async() except app_identity.InternalError, e: if os.environ.get('DATACENTER', '').endswith('sandman'): self.token = None logging.warning('Could not fetch an authentication token in sandman ' 'based Appengine devel setup; proceeding without one.') else: raise e if self.token: headers['authorization'] = 'OAuth ' + self.token deadline = deadline or self.retry_params.urlfetch_timeout ctx = ndb.get_context() resp = yield ctx.urlfetch( url, payload=payload, method=method, headers=headers, follow_redirects=follow_redirects, deadline=deadline, callback=callback) raise ndb.Return(resp)
Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet!
def select_peer(peer_addrs, service, routing_id, method): '''Choose a target from the available peers for a singular message :param peer_addrs: the ``(host, port)``s of the peers eligible to handle the RPC, and possibly a ``None`` entry if this hub can handle it locally :type peer_addrs: list :param service: the service of the message :type service: anything hash-able :param routing_id: the routing_id of the message :type routing_id: int :param method: the message method name :type method: string :returns: one of the provided peer_addrs There is no reason to call this method directly, but it may be useful to override it in a Hub subclass. This default implementation uses ``None`` if it is available (prefer local handling), then falls back to a random selection. ''' if any(p is None for p in peer_addrs): return None return random.choice(peer_addrs)
Choose a target from the available peers for a singular message :param peer_addrs: the ``(host, port)``s of the peers eligible to handle the RPC, and possibly a ``None`` entry if this hub can handle it locally :type peer_addrs: list :param service: the service of the message :type service: anything hash-able :param routing_id: the routing_id of the message :type routing_id: int :param method: the message method name :type method: string :returns: one of the provided peer_addrs There is no reason to call this method directly, but it may be useful to override it in a Hub subclass. This default implementation uses ``None`` if it is available (prefer local handling), then falls back to a random selection.
def find_minimum_spanning_forest(graph): """Calculates the minimum spanning forest of a disconnected graph. Returns a list of lists, each containing the edges that define that tree. Returns an empty list for an empty graph. """ msf = [] if graph.num_nodes() == 0: return msf if graph.num_edges() == 0: return msf connected_components = get_connected_components_as_subgraphs(graph) for subgraph in connected_components: edge_list = kruskal_mst(subgraph) msf.append(edge_list) return msf
Calculates the minimum spanning forest of a disconnected graph. Returns a list of lists, each containing the edges that define that tree. Returns an empty list for an empty graph.
def list_models(self, limit=-1, offset=-1): """List models in the database. Takes optional parameters limit and offset for pagination. Parameters ---------- limit : int Limit number of models in the result set offset : int Set offset in list (order as defined by object store) Returns ------- ObjectListing """ return self.list_objects(limit=limit, offset=offset)
List models in the database. Takes optional parameters limit and offset for pagination. Parameters ---------- limit : int Limit number of models in the result set offset : int Set offset in list (order as defined by object store) Returns ------- ObjectListing
def unwrap_arguments(xml_response): """Extract arguments and their values from a SOAP response. Args: xml_response (str): SOAP/xml response text (unicode, not utf-8). Returns: dict: a dict of ``{argument_name: value}`` items. """ # A UPnP SOAP response (including headers) looks like this: # HTTP/1.1 200 OK # CONTENT-LENGTH: bytes in body # CONTENT-TYPE: text/xml; charset="utf-8" DATE: when response was # generated # EXT: # SERVER: OS/version UPnP/1.0 product/version # # <?xml version="1.0"?> # <s:Envelope # xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" # s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> # <s:Body> # <u:actionNameResponse # xmlns:u="urn:schemas-upnp-org:service:serviceType:v"> # <argumentName>out arg value</argumentName> # ... other out args and their values go here, if any # </u:actionNameResponse> # </s:Body> # </s:Envelope> # Get all tags in order. Elementree (in python 2.x) seems to prefer to # be fed bytes, rather than unicode xml_response = xml_response.encode('utf-8') try: tree = XML.fromstring(xml_response) except XML.ParseError: # Try to filter illegal xml chars (as unicode), in case that is # the reason for the parse error filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\ .encode('utf-8') tree = XML.fromstring(filtered) # Get the first child of the <Body> tag which will be # <{actionNameResponse}> (depends on what actionName is). Turn the # children of this into a {tagname, content} dict. XML unescaping # is carried out for us by elementree. action_response = tree.find( "{http://schemas.xmlsoap.org/soap/envelope/}Body")[0] return dict((i.tag, i.text or "") for i in action_response)
Extract arguments and their values from a SOAP response. Args: xml_response (str): SOAP/xml response text (unicode, not utf-8). Returns: dict: a dict of ``{argument_name: value}`` items.
def delete_handle_value(self, handle, key): ''' Delete a key-value pair from a handle record. If the key exists more than once, all key-value pairs with this key are deleted. :param handle: Handle from whose record the entry should be deleted. :param key: Key to be deleted. Also accepts a list of keys. :raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError` :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` ''' LOGGER.debug('delete_handle_value...') # read handle record: handlerecord_json = self.retrieve_handle_record_json(handle) if handlerecord_json is None: msg = 'Cannot modify unexisting handle' raise HandleNotFoundException(handle=handle, msg=msg) list_of_entries = handlerecord_json['values'] # find indices to delete: keys = None indices = [] if type(key) != type([]): keys = [key] else: keys = key keys_done = [] for key in keys: # filter HS_ADMIN if key == 'HS_ADMIN': op = 'deleting "HS_ADMIN"' raise IllegalOperationException(operation=op, handle=handle) if key not in keys_done: indices_onekey = self.get_handlerecord_indices_for_key(key, list_of_entries) indices = indices + indices_onekey keys_done.append(key) # Important: If key not found, do not continue, as deleting without indices would delete the entire handle!! if not len(indices) > 0: LOGGER.debug('delete_handle_value: No values for key(s) ' + str(keys)) return None else: # delete and process response: op = 'deleting "' + str(keys) + '"' resp = self.__send_handle_delete_request(handle, indices=indices, op=op) if hsresponses.handle_success(resp): LOGGER.debug("delete_handle_value: Deleted handle values " + str(keys) + "of handle " + handle) elif hsresponses.values_not_found(resp): pass else: raise GenericHandleError( operation=op, handle=handle, response=resp )
Delete a key-value pair from a handle record. If the key exists more than once, all key-value pairs with this key are deleted. :param handle: Handle from whose record the entry should be deleted. :param key: Key to be deleted. Also accepts a list of keys. :raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError` :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError`
async def api_call(self, endpoint): """Call the API.""" data = None if self.session is None: self.session = aiohttp.ClientSession() try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get(endpoint, headers=HEADERS) data = await response.json() except aiohttp.ClientError as error: LOGGER.error('Error connecting to Ruter, %s', error) except asyncio.TimeoutError as error: LOGGER.debug('Timeout connecting to Ruter, %s', error) return data
Call the API.
def growSynapses(self, segments, activeInputsBySource, initialPermanence): """ Grow synapses to each of the specified inputs on each specified segment. @param segments (numpy array) The segments that should add synapses @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} @param initialPermanence (float) """ for source, connections in self.connectionsBySource.iteritems(): connections.growSynapses(segments, activeInputsBySource[source], initialPermanence)
Grow synapses to each of the specified inputs on each specified segment. @param segments (numpy array) The segments that should add synapses @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} @param initialPermanence (float)
def place(vertices_resources, nets, machine, constraints, breadth_first=True): """Places vertices in breadth-first order along a hilbert-curve path through the chips in the machine. This is a thin wrapper around the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placement algorithm which optionally uses the :py:func:`breadth_first_vertex_order` vertex ordering (if the breadth_first argument is True, the default) and :py:func:`hilbert_chip_order` for chip ordering. Parameters ---------- breadth_first : bool Should vertices be placed in breadth first order rather than the iteration order of vertices_resources. True by default. """ return sequential_place(vertices_resources, nets, machine, constraints, (None if not breadth_first else breadth_first_vertex_order(vertices_resources, nets)), hilbert_chip_order(machine))
Places vertices in breadth-first order along a hilbert-curve path through the chips in the machine. This is a thin wrapper around the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placement algorithm which optionally uses the :py:func:`breadth_first_vertex_order` vertex ordering (if the breadth_first argument is True, the default) and :py:func:`hilbert_chip_order` for chip ordering. Parameters ---------- breadth_first : bool Should vertices be placed in breadth first order rather than the iteration order of vertices_resources. True by default.
def render(self): """ Returns the rendered release notes from all parsers as a string """ release_notes = [] for parser in self.parsers: parser_content = parser.render() if parser_content is not None: release_notes.append(parser_content) return u"\r\n\r\n".join(release_notes)
Returns the rendered release notes from all parsers as a string
def parameter_names_flat(self, include_fixed=False): """ Return the flattened parameter names for all subsequent parameters of this parameter. We do not include the name for self here! If you want the names for fixed parameters as well in this list, set include_fixed to True. if not hasattr(obj, 'cache'): obj.cache = FunctionCacher() :param bool include_fixed: whether to include fixed names here. """ name_list = [] for p in self.flattened_parameters: name = p.hierarchy_name() if p.size > 1: name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()]) else: name_list.append(name) name_list = np.array(name_list) if not include_fixed and self._has_fixes(): return name_list[self._fixes_] return name_list
Return the flattened parameter names for all subsequent parameters of this parameter. We do not include the name for self here! If you want the names for fixed parameters as well in this list, set include_fixed to True. if not hasattr(obj, 'cache'): obj.cache = FunctionCacher() :param bool include_fixed: whether to include fixed names here.
def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): # TODO: Documentation and tests if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.routes.append(route) self.router.add(rule, verb, route, name=name) if DEBUG: route.prepare() return callback return decorator(callback) if callback else decorator
A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`).
def get_consistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]: """Yield pairs of (source node, target node) for which all of their edges have the same type of relation. :return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations """ for u, v in graph.edges(): if pair_is_consistent(graph, u, v): yield u, v
Yield pairs of (source node, target node) for which all of their edges have the same type of relation. :return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations
def get_cumulative_data(self): """Get the data as it will be charted. The first set will be the actual first data set. The second will be the sum of the first and the second, etc.""" sets = map(itemgetter('data'), self.data) if not sets: return sum = sets.pop(0) yield sum while sets: sum = map(add, sets.pop(0)) yield sum
Get the data as it will be charted. The first set will be the actual first data set. The second will be the sum of the first and the second, etc.
def _findRedundantProteins(protToPeps, pepToProts, proteins=None): """Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the "protToPeps" and "pepToProts" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a "minimal" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of "minimal" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in "pepToProts". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins "A" and "B", protein "B" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in "protToPeps" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides """ if proteins is None: proteins = viewkeys(protToPeps) pepFrequency = _getValueCounts(pepToProts) protPepCounts = _getValueCounts(protToPeps) getCount = operator.itemgetter(1) getProt = operator.itemgetter(0) #TODO: quick and dirty solution #NOTE: add a test for merged proteins proteinTuples = list() for protein in proteins: if isinstance(protein, tuple): proteinTuples.append(protein) else: proteinTuples.append(tuple([protein])) sort = list() for protein in sorted(proteinTuples, reverse=True): if len(protein) == 1: protein = protein[0] protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]] if min(protPepFreq) > 1: sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True)) sort.append((protein, sortValue)) sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True)) redundantProteins = set() for protein in sortedProteins: for pep in protToPeps[protein]: if pepFrequency[pep] <= 1: break else: protPepFrequency = Counter(protToPeps[protein]) pepFrequency.subtract(protPepFrequency) redundantProteins.add(protein) return redundantProteins
Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the "protToPeps" and "pepToProts" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a "minimal" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of "minimal" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in "pepToProts". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins "A" and "B", protein "B" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in "protToPeps" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series """ # Transform latent variables parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1) sample = np.arange(start=rand_int, stop=rand_int+mini_batch) sampled_data = self.data[sample] Y = np.array(sampled_data[self.max_lag:]) X = np.ones(Y.shape[0]) scores = np.zeros(Y.shape[0]) lmda = np.ones(Y.shape[0])*parm[0] theta = np.ones(Y.shape[0])*parm[-1] # Loop over time series for t in range(0,Y.shape[0]): if t < self.max_lag: lmda[t] = parm[0]/(1-np.sum(parm[1:(self.p+1)])) theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t])*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0)) else: # Loop over GARCH terms for p_term in range(0,self.p): lmda[t] += parm[1+p_term]*lmda[t-p_term-1] # Loop over Score terms for q_term in range(0,self.q): lmda[t] += parm[1+self.p+q_term]*scores[t-q_term-1] if self.leverage is True: lmda[t] += parm[-4]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1) theta[t] += (parm[-3] - (1.0/parm[-3]))*np.exp(lmda[t]/2.0)*(np.sqrt(parm[-2])*sp.gamma((parm[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(parm[-2]/2.0)) if (Y[t]-theta[t])>=0: scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3], 2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0 else: scores[t] = (((parm[-2]+1.0)*np.power(Y[t]-theta[t],2))/float(np.power(parm[-3],-2)*parm[-2]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0 return lmda, Y, scores, theta
Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series
def enable_zones(self, zones): """ Enable availability zones to this Access Point. All zones must be in the same region as the Access Point. :type zones: string or List of strings :param zones: The name of the zone(s) to add. """ if isinstance(zones, str) or isinstance(zones, unicode): zones = [zones] new_zones = self.connection.enable_availability_zones(self.name, zones) self.availability_zones = new_zones
Enable availability zones to this Access Point. All zones must be in the same region as the Access Point. :type zones: string or List of strings :param zones: The name of the zone(s) to add.
def _psi(self, x, y, q, s): """ expression after equation (8) in Keeton&Kochanek 1998 :param x: :param y: :param q: :param s: :return: """ return np.sqrt(q**2 * (s**2 + x**2) + y**2)
expression after equation (8) in Keeton&Kochanek 1998 :param x: :param y: :param q: :param s: :return:
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None): """ read in a binary hydmod file and return a dataframe of the results Parameters ---------- hydmod_file : str modflow hydmod binary file hydmod_outfile : str output file to write. If None, use <hydmod_file>.dat. Default is None Returns ------- df : pandas.DataFrame pandas DataFrame with hymod_file values Note ---- requires flopy """ try: import flopy.utils as fu except Exception as e: print('flopy is not installed - cannot read {0}\n{1}'.format(hydmod_file, e)) return #print('Starting to read HYDMOD data from {0}'.format(hydmod_file)) obs = fu.HydmodObs(hydmod_file) hyd_df = obs.get_dataframe() hyd_df.columns = [i[2:] if i.lower() != 'totim' else i for i in hyd_df.columns] #hyd_df.loc[:,"datetime"] = hyd_df.index hyd_df['totim'] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d")) hyd_df.rename(columns={'totim': 'datestamp'}, inplace=True) # reshape into a single column hyd_df = pd.melt(hyd_df, id_vars='datestamp') hyd_df.rename(columns={'value': 'obsval'}, inplace=True) hyd_df['obsnme'] = [i.lower() + '_' + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)] vc = hyd_df.obsnme.value_counts().sort_values() vc = list(vc.loc[vc>1].index.values) if len(vc) > 0: hyd_df.to_csv("hyd_df.duplciates.csv") obs.get_dataframe().to_csv("hyd_org.duplicates.csv") raise Exception("duplicates in obsnme:{0}".format(vc)) #assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme" if not hydmod_outfile: hydmod_outfile = hydmod_file + '.dat' hyd_df.to_csv(hydmod_outfile, columns=['obsnme','obsval'], sep=' ',index=False) #hyd_df = hyd_df[['obsnme','obsval']] return hyd_df[['obsnme','obsval']], hydmod_outfile
read in a binary hydmod file and return a dataframe of the results Parameters ---------- hydmod_file : str modflow hydmod binary file hydmod_outfile : str output file to write. If None, use <hydmod_file>.dat. Default is None Returns ------- df : pandas.DataFrame pandas DataFrame with hymod_file values Note ---- requires flopy
def makeSubDir(dirName): """Makes a given subdirectory if it doesn't already exist, making sure it us public. """ if not os.path.exists(dirName): os.mkdir(dirName) os.chmod(dirName, 0777) return dirName
Makes a given subdirectory if it doesn't already exist, making sure it us public.
def integrate_auto_switch(odes, kw, x, y0, params=(), **kwargs): """ Auto-switching between formulations of ODE system. In case one has a formulation of a system of ODEs which is preferential in the beginning of the integration, this function allows the user to run the integration with this system where it takes a user-specified maximum number of steps before switching to another formulation (unless final value of the independent variables has been reached). Number of systems used i returned as ``nsys`` in info dict. Parameters ---------- odes : iterable of :class:`OdeSy` instances kw : dict mapping kwarg to iterables of same legnth as ``odes`` x : array_like y0 : array_like params : array_like \*\*kwargs: See :meth:`ODESys.integrate` Notes ----- Plays particularly well with :class:`symbolic.TransformedSys`. """ x_arr = np.asarray(x) if x_arr.shape[-1] > 2: raise NotImplementedError("Only adaptive support return_on_error for now") multimode = False if x_arr.ndim < 2 else x_arr.shape[0] nfo_keys = ('nfev', 'njev', 'time_cpu', 'time_wall') next_autonomous = getattr(odes[0], 'autonomous_interface', False) == True # noqa (np.True_) if multimode: tot_x = [np.array([0] if next_autonomous else [x[_][0]]) for _ in range(multimode)] tot_y = [np.asarray([y0[_]]) for _ in range(multimode)] tot_nfo = [defaultdict(int) for _ in range(multimode)] glob_x = [_[0] for _ in x] if next_autonomous else [0.0]*multimode else: tot_x, tot_y, tot_nfo = np.array([0 if next_autonomous else x[0]]), np.asarray([y0]), defaultdict(int) glob_x = x[0] if next_autonomous else 0.0 for oi in range(len(odes)): if oi < len(odes) - 1: next_autonomous = getattr(odes[oi+1], 'autonomous_interface', False) == True # noqa (np.True_) _int_kw = kwargs.copy() for k, v in kw.items(): _int_kw[k] = v[oi] res = odes[oi].integrate(x, y0, params, **_int_kw) if multimode: for idx in range(multimode): tot_x[idx] = np.concatenate((tot_x[idx], res[idx].xout[1:] + glob_x[idx])) tot_y[idx] = np.concatenate((tot_y[idx], res[idx].yout[1:, :])) for k in nfo_keys: if k in res[idx].info: tot_nfo[idx][k] += res[idx].info[k] tot_nfo[idx]['success'] = res[idx].info['success'] else: tot_x = np.concatenate((tot_x, res.xout[1:] + glob_x)) tot_y = np.concatenate((tot_y, res.yout[1:, :])) for k in nfo_keys: if k in res.info: tot_nfo[k] += res.info[k] tot_nfo['success'] = res.info['success'] if multimode: if all([r.info['success'] for r in res]): break else: if res.info['success']: break if oi < len(odes) - 1: if multimode: _x, y0 = [], [] for idx in range(multimode): _x.append(_new_x(res[idx].xout, x[idx], next_autonomous)) y0.append(res[idx].yout[-1, :]) if next_autonomous: glob_x[idx] += res[idx].xout[-1] x = _x else: x = _new_x(res.xout, x, next_autonomous) y0 = res.yout[-1, :] if next_autonomous: glob_x += res.xout[-1] if multimode: # don't return defaultdict tot_nfo = [dict(nsys=oi+1, **_nfo) for _nfo in tot_nfo] return [Result(tot_x[idx], tot_y[idx], res[idx].params, tot_nfo[idx], odes[0]) for idx in range(len(res))] else: tot_nfo = dict(nsys=oi+1, **tot_nfo) return Result(tot_x, tot_y, res.params, tot_nfo, odes[0])
Auto-switching between formulations of ODE system. In case one has a formulation of a system of ODEs which is preferential in the beginning of the integration, this function allows the user to run the integration with this system where it takes a user-specified maximum number of steps before switching to another formulation (unless final value of the independent variables has been reached). Number of systems used i returned as ``nsys`` in info dict. Parameters ---------- odes : iterable of :class:`OdeSy` instances kw : dict mapping kwarg to iterables of same legnth as ``odes`` x : array_like y0 : array_like params : array_like \*\*kwargs: See :meth:`ODESys.integrate` Notes ----- Plays particularly well with :class:`symbolic.TransformedSys`.
def _initialize(self, *args, **kwargs): """Initiaize the mapping matcher with constructor arguments.""" self.items = None self.keys = None self.values = None if args: if len(args) != 2: raise TypeError("expected exactly two positional arguments, " "got %s" % len(args)) if kwargs: raise TypeError( "expected positional or keyword arguments, not both") # got positional arguments only self.keys, self.values = map(self._validate_argument, args) elif kwargs: has_kv = 'keys' in kwargs and 'values' in kwargs has_of = 'of' in kwargs if not (has_kv or has_of): raise TypeError("expected keys/values or items matchers, " "but got: %s" % list(kwargs.keys())) if has_kv and has_of: raise TypeError( "expected keys & values, or items matchers, not both") if has_kv: # got keys= and values= matchers self.keys = self._validate_argument(kwargs['keys']) self.values = self._validate_argument(kwargs['values']) else: # got of= matcher, which can be a tuple of matchers, # or a single matcher for dictionary items of = kwargs['of'] if isinstance(of, tuple): try: # got of= as tuple of matchers self.keys, self.values = \ map(self._validate_argument, of) except ValueError: raise TypeError( "of= tuple has to be a pair of matchers/types" % ( self.__class__.__name__,)) else: # got of= as a single matcher self.items = self._validate_argument(of)
Initiaize the mapping matcher with constructor arguments.
def get_stack_frame(self, max_size = None): """ Reads the contents of the current stack frame. Only works for functions with standard prologue and epilogue. @type max_size: int @param max_size: (Optional) Maximum amount of bytes to read. @rtype: str @return: Stack frame data. May not be accurate, depending on the compiler used. May return an empty string. @raise RuntimeError: The stack frame is invalid, or the function doesn't have a standard prologue and epilogue. @raise WindowsError: An error occured when getting the thread context or reading data from the process memory. """ sp, fp = self.get_stack_frame_range() size = fp - sp if max_size and size > max_size: size = max_size return self.get_process().peek(sp, size)
Reads the contents of the current stack frame. Only works for functions with standard prologue and epilogue. @type max_size: int @param max_size: (Optional) Maximum amount of bytes to read. @rtype: str @return: Stack frame data. May not be accurate, depending on the compiler used. May return an empty string. @raise RuntimeError: The stack frame is invalid, or the function doesn't have a standard prologue and epilogue. @raise WindowsError: An error occured when getting the thread context or reading data from the process memory.
def initialize_memory(basic_block): """ Initializes global memory array with the given one """ global MEMORY MEMORY = basic_block.mem get_labels(MEMORY, basic_block) basic_block.mem = MEMORY
Initializes global memory array with the given one
def create_where(): """ Create a grammar for the 'where' clause used by 'select' """ conjunction = Forward().setResultsName("conjunction") nested = Group(Suppress("(") + conjunction + Suppress(")")).setResultsName( "conjunction" ) maybe_nested = nested | constraint inverted = Group(not_ + maybe_nested).setResultsName("not") full_constraint = maybe_nested | inverted conjunction <<= full_constraint + OneOrMore(and_or + full_constraint) return upkey("where") + Group(conjunction | full_constraint).setResultsName("where")
Create a grammar for the 'where' clause used by 'select'
async def jsk_vc_youtube_dl(self, ctx: commands.Context, *, url: str): """ Plays audio from youtube_dl-compatible sources. """ if not youtube_dl: return await ctx.send("youtube_dl is not installed.") voice = ctx.guild.voice_client if voice.is_playing(): voice.stop() # remove embed maskers if present url = url.lstrip("<").rstrip(">") voice.play(discord.PCMVolumeTransformer(BasicYouTubeDLSource(url))) await ctx.send(f"Playing in {voice.channel.name}.")
Plays audio from youtube_dl-compatible sources.
def show_updates(self): """ Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None """ dists = Distributions() if self.project_name: #Check for a single package pkg_list = [self.project_name] else: #Check for every installed package pkg_list = get_pkglist() found = None for pkg in pkg_list: for (dist, active) in dists.get_distributions("all", pkg, dists.get_highest_installed(pkg)): (project_name, versions) = \ self.pypi.query_versions_pypi(dist.project_name) if versions: #PyPI returns them in chronological order, #but who knows if its guaranteed in the API? #Make sure we grab the highest version: newest = get_highest_version(versions) if newest != dist.version: #We may have newer than what PyPI knows about if pkg_resources.parse_version(dist.version) < \ pkg_resources.parse_version(newest): found = True print(" %s %s (%s)" % (project_name, dist.version, newest)) if not found and self.project_name: self.logger.info("You have the latest version installed.") elif not found: self.logger.info("No newer packages found at The Cheese Shop") return 0
Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None
async def set_volume(self, volume: int, *, device: Optional[SomeDevice] = None): """Set the volume for the user’s current playback device. Parameters ---------- volume : int The volume to set. Must be a value from 0 to 100 inclusive. device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.set_playback_volume(volume, device_id=str(device))
Set the volume for the user’s current playback device. Parameters ---------- volume : int The volume to set. Must be a value from 0 to 100 inclusive. device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
def save_feature(self, cat, img, feature, data): """Saves a new feature.""" filename = self.path(cat, img, feature) mkdir(filename) savemat(filename, {'output':data})
Saves a new feature.
def cmd_cammsg(self, args): '''cammsg''' print("Sent DIGICAM_CONTROL CMD_LONG") self.master.mav.command_long_send( self.settings.target_system, # target_system 0, # target_component mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command 0, # confirmation 10, # param1 20, # param2 30, # param3 40, # param4 50, # param5 60, # param6 70)
cammsg