code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def cancel_inquiry (self): """ Call this method to cancel an inquiry in process. inquiry_complete will still be called. """ self.names_to_find = {} if self.is_inquiring: try: _bt.hci_send_cmd (self.sock, _bt.OGF_LINK_CTL, \ _bt.OCF_INQUIRY_CANCEL) except _bt.error as e: self.sock.close () self.sock = None raise BluetoothError (e.args[0], "error canceling inquiry: " + e.args[1]) self.is_inquiring = False
Call this method to cancel an inquiry in process. inquiry_complete will still be called.
def all_my_hosts_and_services(self): """Create an iterator for all my known hosts and services :return: None """ for what in (self.hosts, self.services): for item in what: yield item
Create an iterator for all my known hosts and services :return: None
def _get_objects_with_same_attribute(self, objects: Set[Object], attribute_function: Callable[[Object], str]) -> Set[Object]: """ Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set. """ objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set) for entity in objects: objects_of_attribute[attribute_function(entity)].add(entity) if not objects_of_attribute: return set() most_frequent_attribute = max(objects_of_attribute, key=lambda x: len(objects_of_attribute[x])) if len(objects_of_attribute[most_frequent_attribute]) <= 1: return set() return objects_of_attribute[most_frequent_attribute]
Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set.
async def parse_update(self, bot): """ Read update from stream and deserialize it. :param bot: bot instance. You an get it from Dispatcher :return: :class:`aiogram.types.Update` """ data = await self.request.json() update = types.Update(**data) return update
Read update from stream and deserialize it. :param bot: bot instance. You an get it from Dispatcher :return: :class:`aiogram.types.Update`
def _apply_to_array(self, yd, y, weights, off_slices, ref_slice, dim): """Applies the finite differences only to slices along a given axis""" ndims = len(y.shape) all = slice(None, None, 1) ref_multi_slice = [all] * ndims ref_multi_slice[dim] = ref_slice for w, s in zip(weights, off_slices): off_multi_slice = [all] * ndims off_multi_slice[dim] = s if abs(1 - w) < 1.E-14: yd[ref_multi_slice] += y[off_multi_slice] else: yd[ref_multi_slice] += w * y[off_multi_slice]
Applies the finite differences only to slices along a given axis
def _get_dvs_capability(dvs_name, dvs_capability): ''' Returns the dict representation of the DVS product_info dvs_name The name of the DVS dvs_capability The DVS capability ''' log.trace('Building the dict of the DVS \'%s\' capability', dvs_name) return {'operation_supported': dvs_capability.dvsOperationSupported, 'portgroup_operation_supported': dvs_capability.dvPortGroupOperationSupported, 'port_operation_supported': dvs_capability.dvPortOperationSupported}
Returns the dict representation of the DVS product_info dvs_name The name of the DVS dvs_capability The DVS capability
def _create_identifier(rdtype, name, content): """ Creates hashed identifier based on full qualified record type, name & content and returns hash. """ sha256 = hashlib.sha256() sha256.update((rdtype + '/').encode('UTF-8')) sha256.update((name + '/').encode('UTF-8')) sha256.update(content.encode('UTF-8')) return sha256.hexdigest()[0:7]
Creates hashed identifier based on full qualified record type, name & content and returns hash.
def dependencies(self, task, params={}, **options): """Returns the compact representations of all of the dependencies of a task. Parameters ---------- task : {Id} The task to get dependencies on. [params] : {Object} Parameters for the request """ path = "/tasks/%s/dependencies" % (task) return self.client.get(path, params, **options)
Returns the compact representations of all of the dependencies of a task. Parameters ---------- task : {Id} The task to get dependencies on. [params] : {Object} Parameters for the request
def get_rupdict(self): """ :returns: a dictionary with the parameters of the rupture """ assert len(self.rup_array) == 1, 'Please specify a slice of length 1' dic = {'trt': self.trt, 'samples': self.samples} with datastore.read(self.filename) as dstore: rupgeoms = dstore['rupgeoms'] source_ids = dstore['source_info']['source_id'] rec = self.rup_array[0] geom = rupgeoms[rec['gidx1']:rec['gidx2']].reshape( rec['sy'], rec['sz']) dic['lons'] = geom['lon'] dic['lats'] = geom['lat'] dic['deps'] = geom['depth'] rupclass, surclass = self.code2cls[rec['code']] dic['rupture_class'] = rupclass.__name__ dic['surface_class'] = surclass.__name__ dic['hypo'] = rec['hypo'] dic['occurrence_rate'] = rec['occurrence_rate'] dic['grp_id'] = rec['grp_id'] dic['n_occ'] = rec['n_occ'] dic['serial'] = rec['serial'] dic['mag'] = rec['mag'] dic['srcid'] = source_ids[rec['srcidx']] return dic
:returns: a dictionary with the parameters of the rupture
def _set_vcs(self, v, load=False): """ Setter method for vcs, mapped from YANG variable /event_handler/event_handler_list/trigger/vcs (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_vcs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vcs() directly. YANG Description: VCS event type. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'switch-bootup': {'value': 1}, u'switch-ready-for-configuration': {'value': 2}},), is_leaf=True, yang_name="vcs", rest_name="vcs", parent=self, choice=(u'trigger-choice', u'vcs'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VCS event type.'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vcs must be of a type compatible with enumeration""", 'defined-type': "brocade-event-handler:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'switch-bootup': {'value': 1}, u'switch-ready-for-configuration': {'value': 2}},), is_leaf=True, yang_name="vcs", rest_name="vcs", parent=self, choice=(u'trigger-choice', u'vcs'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VCS event type.'}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='enumeration', is_config=True)""", }) self.__vcs = t if hasattr(self, '_set'): self._set()
Setter method for vcs, mapped from YANG variable /event_handler/event_handler_list/trigger/vcs (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_vcs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vcs() directly. YANG Description: VCS event type.
def _learnFeatureLocationPair(self, newLocation, featureLocationInput, featureLocationGrowthCandidates): """ Grow / reinforce synapses between the location layer's dendrites and the input layer's active cells. """ potentialOverlaps = self.featureLocationConnections.computeActivity( featureLocationInput) matchingSegments = np.where(potentialOverlaps > self.learningThreshold)[0] # Cells with a active segment pair: reinforce the segment cellsForActiveSegments = self.featureLocationConnections.mapSegmentsToCells( self.activeFeatureLocationSegments) learningActiveSegments = self.activeFeatureLocationSegments[ np.in1d(cellsForActiveSegments, newLocation)] remainingCells = np.setdiff1d(newLocation, cellsForActiveSegments) # Remaining cells with a matching segment pair: reinforce the best matching # segment pair. candidateSegments = self.featureLocationConnections.filterSegmentsByCell( matchingSegments, remainingCells) cellsForCandidateSegments = ( self.featureLocationConnections.mapSegmentsToCells( candidateSegments)) candidateSegments = candidateSegments[ np.in1d(cellsForCandidateSegments, remainingCells)] onePerCellFilter = np2.argmaxMulti(potentialOverlaps[candidateSegments], cellsForCandidateSegments) learningMatchingSegments = candidateSegments[onePerCellFilter] newSegmentCells = np.setdiff1d(remainingCells, cellsForCandidateSegments) for learningSegments in (learningActiveSegments, learningMatchingSegments): self._learn(self.featureLocationConnections, self.rng, learningSegments, featureLocationInput, featureLocationGrowthCandidates, potentialOverlaps, self.initialPermanence, self.sampleSize, self.permanenceIncrement, self.permanenceDecrement, self.maxSynapsesPerSegment) numNewSynapses = len(featureLocationInput) if self.sampleSize != -1: numNewSynapses = min(numNewSynapses, self.sampleSize) if self.maxSynapsesPerSegment != -1: numNewSynapses = min(numNewSynapses, self.maxSynapsesPerSegment) newSegments = self.featureLocationConnections.createSegments( newSegmentCells) self.featureLocationConnections.growSynapsesToSample( newSegments, featureLocationGrowthCandidates, numNewSynapses, self.initialPermanence, self.rng)
Grow / reinforce synapses between the location layer's dendrites and the input layer's active cells.
def _check_for_pi_nodes(self, list, inheader): '''Raise an exception if any of the list descendants are PI nodes. ''' list = list[:] while list: elt = list.pop() t = elt.nodeType if t == _Node.PROCESSING_INSTRUCTION_NODE: raise ParseException('Found processing instruction "<?' + \ elt.nodeName + '...>"', inheader, elt.parentNode, self.dom) elif t == _Node.DOCUMENT_TYPE_NODE: raise ParseException('Found DTD', inheader, elt.parentNode, self.dom) list += _children(elt)
Raise an exception if any of the list descendants are PI nodes.
def _validate_query(query): """Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The validated query. """ query = deepcopy(query) # q is always required if query["q"] == BLANK_QUERY["q"]: raise ValueError("No query specified.") query["q"] = _clean_query_string(query["q"]) # limit should be set to appropriate default if not specified if query["limit"] is None: query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT # If specified, the limit should not be greater than the Search maximum elif query["limit"] > SEARCH_LIMIT: warnings.warn('Reduced result limit from {} to the Search maximum: {}' .format(query["limit"], SEARCH_LIMIT), RuntimeWarning) query["limit"] = SEARCH_LIMIT # Remove all blank/default values for key, val in BLANK_QUERY.items(): # Default for get is NaN so comparison is always False if query.get(key, float('nan')) == val: query.pop(key) # Remove unsupported fields to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()] [query.pop(field) for field in to_remove] return query
Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The validated query.
def convert_entrez_to_uniprot(self, entrez): """Convert Entrez Id to Uniprot Id""" server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) try: data = info['uniprot']['entry']['accession'][0] return data except TypeError: data = info['uniprot']['entry'][0]['accession'][0] return data
Convert Entrez Id to Uniprot Id
def getPDF(self): '''Function that gets vectors of the pdf and target at the last design evaluated. :return: tuple of q values, pdf values, target values ''' if hasattr(self, '_qplot'): return self._qplot, self._hplot, self._tplot else: raise ValueError('''The metric has not been evaluated at any design point so the PDF cannot get obtained''')
Function that gets vectors of the pdf and target at the last design evaluated. :return: tuple of q values, pdf values, target values
def read_config(cls, configparser): """Read configuration file options.""" config = dict() section = cls.__name__ option = "warningregex" if configparser.has_option(section, option): value = configparser.get(section, option) else: value = None config[option] = value return config
Read configuration file options.
def job_step_error(self, job_request_payload, message): """ Send message that the job step failed using payload data. :param job_request_payload: StageJobPayload|RunJobPayload|StoreJobOutputPayload payload from job with error :param message: description of the error """ payload = JobStepErrorPayload(job_request_payload, message) self.send(job_request_payload.error_command, payload)
Send message that the job step failed using payload data. :param job_request_payload: StageJobPayload|RunJobPayload|StoreJobOutputPayload payload from job with error :param message: description of the error
def new_socket(): """ Create a new socket with OS-specific parameters Try to set SO_REUSEPORT for BSD-flavored systems if it's an option. Catches errors if not. """ new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: # noinspection PyUnresolvedReferences reuseport = socket.SO_REUSEPORT except AttributeError: pass else: try: new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1) except (OSError, socket.error) as err: # OSError on python 3, socket.error on python 2 if err.errno != errno.ENOPROTOOPT: raise return new_sock
Create a new socket with OS-specific parameters Try to set SO_REUSEPORT for BSD-flavored systems if it's an option. Catches errors if not.
def validate_replicas(self, data): """Validate distributed experiment""" environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
Validate distributed experiment
def _call(self, endpoint, data=None): """ Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data. """ data = {} if data is None else data try: data['access_token'] = self.access_token() return self._request(endpoint, data) except AccessTokenExpired: self._cached_access_token = None data['access_token'] = self.access_token() return self._request(endpoint, data)
Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data.
def run(self): """Build the Fortran library, all python extensions and the docs.""" print('---- BUILDING ----') _build.run(self) # build documentation print('---- BUILDING DOCS ----') docdir = os.path.join(self.build_lib, 'pyshtools', 'doc') self.mkpath(docdir) doc_builder = os.path.join(self.build_lib, 'pyshtools', 'make_docs.py') doc_source = '.' check_call([sys.executable, doc_builder, doc_source, self.build_lib]) print('---- ALL DONE ----')
Build the Fortran library, all python extensions and the docs.
def _read_data(path): """Read Rdump output and transform to Python dictionary. Parameters ---------- path : str Returns ------- Dict key, values pairs from Rdump formatted data. """ data = {} with open(path, "r") as f_obj: var = "" for line in f_obj: if "<-" in line: if len(var): key, var = _process_data_var(var) data[key] = var var = "" var += " " + line.strip() if len(var): key, var = _process_data_var(var) data[key] = var return data
Read Rdump output and transform to Python dictionary. Parameters ---------- path : str Returns ------- Dict key, values pairs from Rdump formatted data.
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")): """Determines if ``cls`` is indeed a subclass of ``classnames``""" if isinstance(cls, list): return any(is_handler_subclass(c) for c in cls) elif isinstance(cls, type): return any(c.__name__ in classnames for c in inspect.getmro(cls)) else: raise TypeError( "Unexpected type `{}` for class `{}`".format( type(cls), cls ) )
Determines if ``cls`` is indeed a subclass of ``classnames``
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): ''' This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed ''' logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.') else: with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[parameter] # scan parameter settings used if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.') else: logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events analyze_data.reset() # resets the data of the last analysis logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[') actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0])) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning('Analysis shows inconsistent number of hits. Check needed!') logging.info('Analyzed %d hits!', total_hits) cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed
def trace_line_numbers(filename, reload_on_change=False): """Return an Array of breakpoints in filename. The list will contain an entry for each distinct line event call so it is possible (and possibly useful) for a line number appear more than once.""" fullname = cache_file(filename, reload_on_change) if not fullname: return None e = file_cache[filename] if not e.line_numbers: if hasattr(coverage.coverage, 'analyze_morf'): e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1] else: cov = coverage.coverage() cov._warn_no_data = False e.line_numbers = cov.analysis(fullname)[1] pass pass return e.line_numbers
Return an Array of breakpoints in filename. The list will contain an entry for each distinct line event call so it is possible (and possibly useful) for a line number appear more than once.
def combine_hex(data): ''' Combine list of integer values to one big integer ''' output = 0x00 for i, value in enumerate(reversed(data)): output |= (value << i * 8) return output
Combine list of integer values to one big integer
def dist_to_deg(self, distance, latitude): """ distance = distance in meters latitude = latitude in degrees at the equator, the distance of one degree is equal in latitude and longitude. at higher latitudes, a degree longitude is shorter in length, proportional to cos(latitude) http://en.wikipedia.org/wiki/Decimal_degrees This function is part of a distance filter where the database 'distance' is in degrees. There's no good single-valued answer to this problem. The distance/ degree is quite constant N/S around the earth (latitude), but varies over a huge range E/W (longitude). Split the difference: I'm going to average the the degrees latitude and degrees longitude corresponding to the given distance. At high latitudes, this will be too short N/S and too long E/W. It splits the errors between the two axes. Errors are < 25 percent for latitudes < 60 degrees N/S. """ # d * (180 / pi) / earthRadius ==> degrees longitude # (degrees longitude) / cos(latitude) ==> degrees latitude lat = latitude if latitude >= 0 else -1 * latitude rad2deg = 180 / pi earthRadius = 6378160.0 latitudeCorrection = 0.5 * (1 + cos(lat * pi / 180)) return (distance / (earthRadius * latitudeCorrection) * rad2deg)
distance = distance in meters latitude = latitude in degrees at the equator, the distance of one degree is equal in latitude and longitude. at higher latitudes, a degree longitude is shorter in length, proportional to cos(latitude) http://en.wikipedia.org/wiki/Decimal_degrees This function is part of a distance filter where the database 'distance' is in degrees. There's no good single-valued answer to this problem. The distance/ degree is quite constant N/S around the earth (latitude), but varies over a huge range E/W (longitude). Split the difference: I'm going to average the the degrees latitude and degrees longitude corresponding to the given distance. At high latitudes, this will be too short N/S and too long E/W. It splits the errors between the two axes. Errors are < 25 percent for latitudes < 60 degrees N/S.
def avg_receive_rate(self): """Average receiving rate in MB/s over the entire run. This data may not exist if iperf was interrupted. If the result is not from a success run, this property is None. """ if not self._has_data or 'sum_received' not in self.result['end']: return None bps = self.result['end']['sum_received']['bits_per_second'] return bps / 8 / 1024 / 1024
Average receiving rate in MB/s over the entire run. This data may not exist if iperf was interrupted. If the result is not from a success run, this property is None.
def _prepare_for_submission(self,tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError("No structure specified for this " "calculation") if not isinstance(structure,StructureData): raise InputValidationError("structure node is not of type" "StructureData") try: settings = inputdict.pop(self.get_linkname('settings'),None) except KeyError: pass if settings is not None: if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: kpoints = inputdict.pop(self.get_linkname('kpoints'),None) except KeyError: pass if kpoints is not None: if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") ############################## # END OF INITIAL INPUT CHECK # ############################## # default atom getter: I will always retrieve the total energy at least default_atoms_getters = [ ["total_energy",""] ] # ================================ # save the structure in ase format atoms = structure.get_ase() atoms.write(tempfolder.get_abs_path(self._input_aseatoms)) # ================== prepare the arguments of functions ================ parameters_dict = parameters.get_dict() settings_dict = settings.get_dict() if settings is not None else {} # ==================== fix the args of the optimizer optimizer = parameters_dict.pop("optimizer",None) if optimizer is not None: # Validation if not isinstance(optimizer,dict): raise InputValidationError("optimizer key must contain a dictionary") # get the name of the optimizer optimizer_name = optimizer.pop("name",None) if optimizer_name is None: raise InputValidationError("Don't have access to the optimizer name") # prepare the arguments to be passed to the optimizer class optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[])) # prepare the arguments to be passed to optimizer.run() optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[])) # prepare the import string optimizer_import_string = get_optimizer_impstr(optimizer_name) # ================= determine the calculator name and its import ==== calculator = parameters_dict.pop("calculator",{}) calculator_import_string = get_calculator_impstr(calculator.pop("name",None)) # =================== prepare the arguments for the calculator call read_calc_args = calculator.pop("args",[]) #calc_args = calculator.pop("args",None) if read_calc_args is None: calc_argsstr = "" else: # transform a in "a" if a is a string (needed for formatting) calc_args = {} for k,v in read_calc_args.iteritems(): if isinstance(v, basestring): the_v = '"{}"'.format(v) else: the_v = v calc_args[k] = the_v def return_a_function(v): try: has_magic = "@function" in v.keys() except AttributeError: has_magic = False if has_magic: args_dict = {} for k2,v2 in v['args'].iteritems(): if isinstance(v2,basestring): the_v = '"{}"'.format(v2) else: the_v = v2 args_dict[k2] = the_v v2 = "{}({})".format(v['@function'], ", ".join(["{}={}".format(k_,v_) for k_,v_ in args_dict.iteritems()])) return v2 else: return v tmp_list = [ "{}={}".format(k,return_a_function(v)) for k,v in calc_args.iteritems() ] calc_argsstr = ", ".join( tmp_list ) # add kpoints if present if kpoints: #TODO: here only the mesh is supported # maybe kpoint lists are supported as well in ASE calculators try: mesh = kpoints.get_kpoints_mesh()[0] except AttributeError: raise InputValidationError("Coudn't find a mesh of kpoints" " in the KpointsData") calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] ) # =============== prepare the methods of atoms.get(), to save results atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) ) # =============== prepare the methods of calculator.get(), to save results calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) ) # ===================== build the strings with the module imports all_imports = ["import ase", 'import ase.io', "import json", "import numpy", calculator_import_string] if optimizer is not None: all_imports.append(optimizer_import_string) try: if "PW" in calc_args['mode'].values(): all_imports.append("from gpaw import PW") except KeyError: pass extra_imports = parameters_dict.pop("extra_imports",[]) for i in extra_imports: if isinstance(i,basestring): all_imports.append("import {}".format(i)) elif isinstance(i,(list,tuple)): if not all( [isinstance(j,basestring) for j in i] ): raise ValueError("extra import must contain strings") if len(i)==2: all_imports.append("from {} import {}".format(*i)) elif len(i)==3: all_imports.append("from {} import {} as {}".format(*i)) else: raise ValueError("format for extra imports not recognized") else: raise ValueError("format for extra imports not recognized") if self.get_withmpi(): all_imports.append( "from ase.parallel import paropen" ) all_imports_string = "\n".join(all_imports) + "\n" # =================== prepare the python script ======================== input_txt = "" input_txt += get_file_header() input_txt += "# calculation pk: {}\n".format(self.pk) input_txt += "\n" input_txt += all_imports_string input_txt += "\n" pre_lines = parameters_dict.pop("pre_lines",None) if pre_lines is not None: if not isinstance(pre_lines,(list,tuple)): raise ValueError("Prelines must be a list of strings") if not all( [isinstance(_,basestring) for _ in pre_lines] ): raise ValueError("Prelines must be a list of strings") input_txt += "\n".join(pre_lines) + "\n\n" input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms) input_txt += "\n" input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr) input_txt += "atoms.set_calculator(calculator)\n" input_txt += "\n" if optimizer is not None: # here block the trajectory file name: trajectory = 'aiida.traj' input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr) input_txt += "optimizer.run({})\n".format(optimizer_runargsstr) input_txt += "\n" # now dump / calculate the results input_txt += "results = {}\n" for getter,getter_args in atoms_getters: input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" for getter,getter_args in calculator_getters: input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" # Convert to lists input_txt += "for k,v in results.iteritems():\n" input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n" input_txt += " results[k] = results[k].tolist()\n" input_txt += "\n" post_lines = parameters_dict.pop("post_lines",None) if post_lines is not None: if not isinstance(post_lines,(list,tuple)): raise ValueError("Postlines must be a list of strings") if not all( [isinstance(_,basestring) for _ in post_lines] ): raise ValueError("Postlines must be a list of strings") input_txt += "\n".join(post_lines) + "\n\n" # Dump results to file right_open = "paropen" if self.get_withmpi() else "open" input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME) input_txt += " json.dump(results,f)" input_txt += "\n" # Dump trajectory if present if optimizer is not None: input_txt += "atoms.write('{}')\n".format(self._output_aseatoms) input_txt += "\n" # write all the input script to a file input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename,'w') as infile: infile.write(input_txt) # ============================ calcinfo ================================ # TODO: look at the qmmm infoL: it might be necessary to put # some singlefiles in the directory. # right now it has to be taken care in the pre_lines local_copy_list = [] remote_copy_list = [] additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[]) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default # calcinfo.cmdline_params = settings_dict.pop('CMDLINE', []) calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list codeinfo = CodeInfo() codeinfo.cmdline_params = [self._INPUT_FILE_NAME] #calcinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] # Retrieve files calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append(self._output_aseatoms) calcinfo.retrieve_list += additional_retrieve_list # TODO: I should have two ways of running it: with gpaw-python in parallel # and executing python if in serial return calcinfo
This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!)
def load_average(self): """ Returns the current load average. """ with io.open(self.load_average_file, 'r') as f: file_columns = f.readline().strip().split() return float(file_columns[self._load_average_file_column])
Returns the current load average.
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None): """ Create hierarchy of directories, at it is required in specification. `root_dir` is root of the package generated using :attr:`settings.TEMP_DIR` and :func:`_get_package_name`. `orig_dir` is path to the directory, where the data files are stored. `metadata_dir` is path to the directory with MODS metadata. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Warning: If the `root_dir` exists, it is REMOVED! Returns: list of str: root_dir, orig_dir, metadata_dir """ root_dir = _get_package_name(book_id=book_id, prefix=prefix) if os.path.exists(root_dir): shutil.rmtree(root_dir) os.mkdir(root_dir) original_dir = os.path.join(root_dir, "original") metadata_dir = os.path.join(root_dir, "metadata") os.mkdir(original_dir) os.mkdir(metadata_dir) return root_dir, original_dir, metadata_dir
Create hierarchy of directories, at it is required in specification. `root_dir` is root of the package generated using :attr:`settings.TEMP_DIR` and :func:`_get_package_name`. `orig_dir` is path to the directory, where the data files are stored. `metadata_dir` is path to the directory with MODS metadata. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Warning: If the `root_dir` exists, it is REMOVED! Returns: list of str: root_dir, orig_dir, metadata_dir
def all(self, query=None, **kwargs): """ Gets all organizations. """ return super(OrganizationsProxy, self).all(query=query)
Gets all organizations.
def SetPercentageView(self, percentageView): """Set whether to display percentage or absolute values""" self.percentageView = percentageView self.percentageMenuItem.Check(self.percentageView) self.percentageViewTool.SetValue(self.percentageView) total = self.adapter.value( self.loader.get_root( self.viewType ) ) for control in self.ProfileListControls: control.SetPercentage(self.percentageView, total) self.adapter.SetPercentage(self.percentageView, total)
Set whether to display percentage or absolute values
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, required: bool = False, **kwargs): """ Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified """ for p in self.parsers: try: val = p.get( variable_path, default=self.sentinel, coerce_type=coerce_type, coercer=coercer, **kwargs ) if val != self.sentinel: self.enqueue(variable_path, p, val) return val except Exception as e: if not self.silent: raise if self.suppress_logs: continue self.logger.error('Parser {0} cannot get key `{1}`: {2}'.format( p.__class__.__name__, variable_path, str(e) )) self.enqueue(variable_path, value=default) if not default and required: raise exceptions.RequiredValueIsEmpty( 'No default provided and no value read for `{0}`'.format(variable_path)) return default
Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified
def pre_build(local_root, versions): """Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str """ log = logging.getLogger(__name__) exported_root = TempDir(True).name # Extract all. for sha in {r['sha'] for r in versions.remotes}: target = os.path.join(exported_root, sha) log.debug('Exporting %s to temporary directory.', sha) export(local_root, sha, target) # Build root. remote = versions[Config.from_context().root_ref] with TempDir() as temp_dir: log.debug('Building root (before setting root_dirs) in temporary directory: %s', temp_dir) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) build(source, temp_dir, versions, remote['name'], True) existing = os.listdir(temp_dir) # Define root_dir for all versions to avoid file name collisions. for remote in versions.remotes: root_dir = RE_INVALID_FILENAME.sub('_', remote['name']) while root_dir in existing: root_dir += '_' remote['root_dir'] = root_dir log.debug('%s root directory is %s', remote['name'], root_dir) existing.append(root_dir) # Get found_docs and master_doc values for all versions. for remote in list(versions.remotes): log.debug('Partially running sphinx-build to read configuration for: %s', remote['name']) source = os.path.dirname(os.path.join(exported_root, remote['sha'], remote['conf_rel_path'])) try: config = read_config(source, remote['name']) except HandledError: log.warning('Skipping. Will not be building: %s', remote['name']) versions.remotes.pop(versions.remotes.index(remote)) continue remote['found_docs'] = config['found_docs'] remote['master_doc'] = config['master_doc'] return exported_root
Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str
def validate_json_schema(data, schema, name="task"): """Given data and a jsonschema, let's validate it. This happens for tasks and chain of trust artifacts. Args: data (dict): the json to validate. schema (dict): the jsonschema to validate against. name (str, optional): the name of the json, for exception messages. Defaults to "task". Raises: ScriptWorkerTaskException: on failure """ try: jsonschema.validate(data, schema) except jsonschema.exceptions.ValidationError as exc: raise ScriptWorkerTaskException( "Can't validate {} schema!\n{}".format(name, str(exc)), exit_code=STATUSES['malformed-payload'] )
Given data and a jsonschema, let's validate it. This happens for tasks and chain of trust artifacts. Args: data (dict): the json to validate. schema (dict): the jsonschema to validate against. name (str, optional): the name of the json, for exception messages. Defaults to "task". Raises: ScriptWorkerTaskException: on failure
def compress_flood_fill_regions(targets): """Generate a reduced set of flood fill parameters. Parameters ---------- targets : {(x, y) : set([c, ...]), ...} For each used chip a set of core numbers onto which an application should be loaded. E.g., the output of :py:func:`~rig.place_and_route.util.build_application_map` when indexed by an application. Yields ------ (region, core mask) Pair of integers which represent a region of a SpiNNaker machine and a core mask of selected cores within that region for use in flood-filling an application. `region` and `core_mask` are both integer representations of bit fields that are understood by SCAMP. The pairs are yielded in an order suitable for direct use with SCAMP's flood-fill core select (FFCS) method of loading. """ t = RegionCoreTree() for (x, y), cores in iteritems(targets): for p in cores: t.add_core(x, y, p) return sorted(t.get_regions_and_coremasks())
Generate a reduced set of flood fill parameters. Parameters ---------- targets : {(x, y) : set([c, ...]), ...} For each used chip a set of core numbers onto which an application should be loaded. E.g., the output of :py:func:`~rig.place_and_route.util.build_application_map` when indexed by an application. Yields ------ (region, core mask) Pair of integers which represent a region of a SpiNNaker machine and a core mask of selected cores within that region for use in flood-filling an application. `region` and `core_mask` are both integer representations of bit fields that are understood by SCAMP. The pairs are yielded in an order suitable for direct use with SCAMP's flood-fill core select (FFCS) method of loading.
def getTypeStr(_type): r"""Gets the string representation of the given type. """ if isinstance(_type, CustomType): return str(_type) if hasattr(_type, '__name__'): return _type.__name__ return ''
r"""Gets the string representation of the given type.
def set_state_view(self, request): """ Changes the experiment state """ if not request.user.has_perm('experiments.change_experiment'): return HttpResponseForbidden() try: state = int(request.POST.get("state", "")) except ValueError: return HttpResponseBadRequest() try: experiment = Experiment.objects.get(name=request.POST.get("experiment")) except Experiment.DoesNotExist: return HttpResponseBadRequest() experiment.state = state if state == 0: experiment.end_date = timezone.now() else: experiment.end_date = None experiment.save() return HttpResponse()
Changes the experiment state
def make_dataset(self, dataset, raise_if_exists=False, body=None): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """ if body is None: body = {} try: # Construct a message body in the format required by # https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert body['datasetReference'] = { 'projectId': dataset.project_id, 'datasetId': dataset.dataset_id } if dataset.location is not None: body['location'] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
def setup_statemachine(self): """Setup and start state machine""" machine = QtCore.QStateMachine() # _______________ # | | # | | # | | # |_______________| # group = util.QState("group", QtCore.QState.ParallelStates, machine) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # visibility = util.QState("visibility", group) hidden = util.QState("hidden", visibility) visible = util.QState("visible", visibility) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # operation = util.QState("operation", group) ready = util.QState("ready", operation) collecting = util.QState("collecting", operation) validating = util.QState("validating", operation) extracting = util.QState("extracting", operation) integrating = util.QState("integrating", operation) finished = util.QState("finished", operation) repairing = util.QState("repairing", operation) initialising = util.QState("initialising", operation) stopping = util.QState("stopping", operation) stopped = util.QState("stopped", operation) saving = util.QState("saving", operation) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # errored = util.QState("errored", group) clean = util.QState("clean", errored) dirty = util.QState("dirty", errored) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| - Parallell State # States that block the underlying GUI suspended = util.QState("suspended", group) alive = util.QState("alive", suspended) acting = util.QState("acting", suspended) acted = QtCore.QHistoryState(operation) acted.setDefaultState(ready) # _______________ # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| # | ____ ____ | # || |---| || # ||____|---|____|| # |_______________| # hidden.addTransition(self.show, visible) visible.addTransition(self.hide, hidden) ready.addTransition(self.acting, acting) ready.addTransition(self.validating, validating) ready.addTransition(self.initialising, initialising) ready.addTransition(self.repairing, repairing) ready.addTransition(self.saving, saving) saving.addTransition(self.saved, ready) collecting.addTransition(self.initialised, ready) collecting.addTransition(self.stopping, stopping) validating.addTransition(self.stopping, stopping) validating.addTransition(self.finished, finished) validating.addTransition(self.extracting, extracting) extracting.addTransition(self.stopping, stopping) extracting.addTransition(self.finished, finished) extracting.addTransition(self.integrating, integrating) integrating.addTransition(self.stopping, stopping) integrating.addTransition(self.finished, finished) finished.addTransition(self.initialising, initialising) finished.addTransition(self.acting, acting) initialising.addTransition(self.collecting, collecting) stopping.addTransition(self.acted, acted) stopping.addTransition(self.finished, finished) dirty.addTransition(self.initialising, clean) clean.addTransition(self.changed, dirty) alive.addTransition(self.acting, acting) acting.addTransition(self.acted, acted) # Set initial states for compound, state in {machine: group, visibility: hidden, operation: ready, errored: clean, suspended: alive}.items(): compound.setInitialState(state) # Make connections for state in (hidden, visible, ready, collecting, validating, extracting, integrating, finished, repairing, initialising, stopping, saving, stopped, dirty, clean, acting, alive, acted): state.entered.connect( lambda state=state: self.state_changed.emit(state.name)) machine.start() return machine
Setup and start state machine
def reduce_claims(query_claims): """ returns claims as reduced dict {P: [Q's or values]} P = property Q = item """ claims = collections.defaultdict(list) for claim, entities in query_claims.items(): for ent in entities: try: snak = ent.get('mainsnak') snaktype = snak.get('snaktype') value = snak.get('datavalue').get('value') except AttributeError: claims[claim] = [] try: if snaktype != 'value': val = snaktype elif value.get('id'): val = value.get('id') elif value.get('text'): val = value.get('text') elif value.get('time'): val = value.get('time') else: val = value except AttributeError: val = value if not val or not [x for x in val if x]: raise ValueError("%s %s" % (claim, ent)) claims[claim].append(val) return dict(claims)
returns claims as reduced dict {P: [Q's or values]} P = property Q = item
def parse_bool(value): """ Parse string to bool. :param str value: String value to parse as bool :return bool: """ boolean = parse_str(value).capitalize() if boolean in ("True", "Yes", "On", "1"): return True elif boolean in ("False", "No", "Off", "0"): return False else: raise ValueError('Unable to parse boolean value "{}"'.format(value))
Parse string to bool. :param str value: String value to parse as bool :return bool:
def render(self, message=None, css_class='alert', form_contents=None, status=200, title="Python OpenID Consumer Example", sreg_data=None, pape_data=None): """Render a page.""" self.send_response(status) self.pageHeader(title) if message: self.wfile.write("<div class='%s'>" % (css_class,)) self.wfile.write(message) self.wfile.write("</div>") if sreg_data is not None: self.renderSREG(sreg_data) if pape_data is not None: self.renderPAPE(pape_data) self.pageFooter(form_contents)
Render a page.
def covertype(): """Builds the Covertype data set.""" import sklearn.datasets # pylint: disable=g-import-not-at-top data = sklearn.datasets.covtype.fetch_covtype() features = data.data labels = data.target # Normalize features and append a column of ones for the intercept. features -= features.mean(0) features /= features.std(0) features = np.hstack([features, np.ones([features.shape[0], 1])]) features = tf.cast(features, dtype=tf.float32) # Binarize outcomes on whether it is a specific category. _, counts = np.unique(labels, return_counts=True) specific_category = np.argmax(counts) labels = (labels == specific_category) labels = tf.cast(labels, dtype=tf.int32) return features, labels
Builds the Covertype data set.
def t_fold_end(self, t): r'\n+\ *' column = find_column(t) indent = self.indent_stack[-1] if column < indent: rollback_lexpos(t) if column <= indent: t.lexer.pop_state() t.type = 'B_FOLD_END' if column > indent: t.type = 'SCALAR' return t
r'\n+\ *
def targets(tgt, tgt_type='glob', **kwargs): ''' Return the targets from the directory of flat yaml files, checks opts for location. ''' roster_dir = __opts__.get('roster_dir', '/etc/salt/roster.d') # Match the targets before rendering to avoid opening files unnecessarily. raw = dict.fromkeys(os.listdir(roster_dir), '') log.debug('Filtering %d minions in %s', len(raw), roster_dir) matched_raw = __utils__['roster_matcher.targets'](raw, tgt, tgt_type, 'ipv4') rendered = {minion_id: _render(os.path.join(roster_dir, minion_id), **kwargs) for minion_id in matched_raw} pruned_rendered = {id_: data for id_, data in rendered.items() if data} log.debug('Matched %d minions with tgt=%s and tgt_type=%s.' ' Discarded %d matching filenames because they had rendering errors.', len(rendered), tgt, tgt_type, len(rendered) - len(pruned_rendered)) return pruned_rendered
Return the targets from the directory of flat yaml files, checks opts for location.
def decode_body(headers: MutableMapping, body: bytes) -> dict: """ Decode the response body For 'application/json' content-type load the body as a dictionary Args: headers: Response headers body: Response body Returns: decoded body """ type_, encoding = parse_content_type(headers) decoded_body = body.decode(encoding) # There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict. if type_ == "application/json": payload = json.loads(decoded_body) else: if decoded_body == "ok": payload = {"ok": True} else: payload = {"ok": False, "data": decoded_body} return payload
Decode the response body For 'application/json' content-type load the body as a dictionary Args: headers: Response headers body: Response body Returns: decoded body
def getNextRecord(self, useCache=True): """ Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record. """ assert self._file is not None assert self._mode == self._FILE_READ_MODE # Read the line try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if self._recordCount == 0: raise Exception("The source configured to reset at EOF but " "'%s' appears to be empty" % self._filename) self.rewind() line = self._reader.next() else: return None # Keep score of how many records were read self._recordCount += 1 # Split the line to text fields and convert each text field to a Python # object if value is missing (empty string) encode appropriately for # upstream consumers in the case of numeric types, this means replacing # missing data with a sentinel value for string type, we can leave the empty # string in place record = [] for i, f in enumerate(line): #print "DEBUG: Evaluating field @ index %s: %r" % (i, f) #sys.stdout.flush() if f in self._missingValues: record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: # either there is valid data, or the field is string type, # in which case the adapter does the right thing by default record.append(self._adapters[i](f)) return record
Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record.
def download_file(pk): """Download the file reference in `models.ReleaseFile` with the given pk. """ release_file = models.ReleaseFile.objects.get(pk=pk) logging.info("Downloading %s", release_file.url) proxies = None if settings.LOCALSHOP_HTTP_PROXY: proxies = settings.LOCALSHOP_HTTP_PROXY response = requests.get(release_file.url, stream=True, proxies=proxies) # Write the file to the django file field filename = os.path.basename(release_file.url) # Setting the size manually since Django can't figure it our from # the raw HTTPResponse if 'content-length' in response.headers: size = int(response.headers['content-length']) else: size = len(response.content) # Setting the content type by first looking at the response header # and falling back to guessing it from the filename default_content_type = 'application/octet-stream' content_type = response.headers.get('content-type') if content_type is None or content_type == default_content_type: content_type = mimetypes.guess_type(filename)[0] or default_content_type # Using Django's temporary file upload system to not risk memory # overflows with TemporaryUploadedFile(name=filename, size=size, charset='utf-8', content_type=content_type) as temp_file: temp_file.write(response.content) temp_file.seek(0) # Validate the md5 hash of the downloaded file md5_hash = md5_hash_file(temp_file) if md5_hash != release_file.md5_digest: logging.error("MD5 hash mismatch: %s (expected: %s)" % ( md5_hash, release_file.md5_digest)) return release_file.distribution.save(filename, temp_file) release_file.save() logging.info("Complete")
Download the file reference in `models.ReleaseFile` with the given pk.
def get_file_search(self, query): """Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report. """ api_name = 'virustotal-file-search' (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, 'file/search') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report.
def geo(self): """ General image geo information. Returns ------- dict a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y` """ out = dict(zip(['xmin', 'xres', 'rotation_x', 'ymax', 'rotation_y', 'yres'], self.raster.GetGeoTransform())) # note: yres is negative! out['xmax'] = out['xmin'] + out['xres'] * self.cols out['ymin'] = out['ymax'] + out['yres'] * self.rows return out
General image geo information. Returns ------- dict a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y`
def set_mapper_index(self, index, mapper): """Set the mapper to the given index :param index: the index to set :type index: QtCore.QModelIndex :param mapper: the mapper to set :type mapper: QtGui.QDataWidgetMapper :returns: None :rtype: None :raises: None """ parent = index.parent() mapper.setRootIndex(parent) mapper.setCurrentModelIndex(index)
Set the mapper to the given index :param index: the index to set :type index: QtCore.QModelIndex :param mapper: the mapper to set :type mapper: QtGui.QDataWidgetMapper :returns: None :rtype: None :raises: None
def read_config(config): """Read config file and return uncomment line """ for line in config.splitlines(): line = line.lstrip() if line and not line.startswith("#"): return line return ""
Read config file and return uncomment line
def rename(self, path, raise_if_exists=False): """ Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522 """ if isinstance(path, HdfsTarget): path = path.path if raise_if_exists and self.fs.exists(path): raise RuntimeError('Destination exists: %s' % path) self.fs.rename(self.path, path)
Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522
def html_abstract(self): """HTML5-formatted document abstract (`str`).""" return self.format_abstract(format='html5', deparagraph=False, mathjax=False, smart=True)
HTML5-formatted document abstract (`str`).
def get_version(): """ Return tmux version. If tmux is built from git master, the version returned will be the latest version appended with -master, e.g. ``2.4-master``. If using OpenBSD's base system tmux, the version will have ``-openbsd`` appended to the latest version, e.g. ``2.4-openbsd``. Returns ------- :class:`distutils.version.LooseVersion` tmux version according to :func:`libtmux.common.which`'s tmux """ proc = tmux_cmd('-V') if proc.stderr: if proc.stderr[0] == 'tmux: unknown option -- V': if sys.platform.startswith("openbsd"): # openbsd has no tmux -V return LooseVersion('%s-openbsd' % TMUX_MAX_VERSION) raise exc.LibTmuxException( 'libtmux supports tmux %s and greater. This system' ' is running tmux 1.3 or earlier.' % TMUX_MIN_VERSION ) raise exc.VersionTooLow(proc.stderr) version = proc.stdout[0].split('tmux ')[1] # Allow latest tmux HEAD if version == 'master': return LooseVersion('%s-master' % TMUX_MAX_VERSION) version = re.sub(r'[a-z-]', '', version) return LooseVersion(version)
Return tmux version. If tmux is built from git master, the version returned will be the latest version appended with -master, e.g. ``2.4-master``. If using OpenBSD's base system tmux, the version will have ``-openbsd`` appended to the latest version, e.g. ``2.4-openbsd``. Returns ------- :class:`distutils.version.LooseVersion` tmux version according to :func:`libtmux.common.which`'s tmux
def emitError(self, level): '''determine if a level should print to stderr, includes all levels but INFO and QUIET''' if level in [ABORT, ERROR, WARNING, VERBOSE, VERBOSE1, VERBOSE2, VERBOSE3, DEBUG]: return True return False
determine if a level should print to stderr, includes all levels but INFO and QUIET
def intersect(self, range_): self.solver.intersection_broad_tests_count += 1 """Remove variants whose version fall outside of the given range.""" if range_.is_any(): return self if self.solver.optimised: if range_ in self.been_intersected_with: return self if self.pr: self.pr.passive("intersecting %s wrt range '%s'...", self, range_) self.solver.intersection_tests_count += 1 with self.solver.timed(self.solver.intersection_time): # this is faster than iter_intersecting :( entries = [x for x in self.entries if x.version in range_] if not entries: return None elif len(entries) < len(self.entries): copy_ = self._copy(entries) copy_.been_intersected_with.add(range_) return copy_ else: self.been_intersected_with.add(range_) return self
Remove variants whose version fall outside of the given range.
def handle_annotations_url(self, line: str, position: int, tokens: ParseResults) -> ParseResults: """Handle statements like ``DEFINE ANNOTATION X AS URL "Y"``. :raises: RedefinedAnnotationError """ keyword = tokens['name'] self.raise_for_redefined_annotation(line, position, keyword) url = tokens['url'] self.annotation_url_dict[keyword] = url if self.skip_validation: return tokens self.annotation_to_term[keyword] = self.manager.get_annotation_entry_names(url) return tokens
Handle statements like ``DEFINE ANNOTATION X AS URL "Y"``. :raises: RedefinedAnnotationError
def Suratman(L, rho, mu, sigma): r'''Calculates Suratman number, `Su`, for a fluid with the given characteristic length, density, viscosity, and surface tension. .. math:: \text{Su} = \frac{\rho\sigma L}{\mu^2} Parameters ---------- L : float Characteristic length [m] rho : float Density of fluid, [kg/m^3] mu : float Viscosity of fluid, [Pa*s] sigma : float Surface tension, [N/m] Returns ------- Su : float Suratman number [] Notes ----- Also known as Laplace number. Used in two-phase flow, especially the bubbly-slug regime. No confusion regarding the definition of this group has been observed. .. math:: \text{Su} = \frac{\text{Re}^2}{\text{We}} =\frac{\text{Inertia}\cdot \text{Surface tension} }{\text{(viscous forces)}^2} The oldest reference to this group found by the author is in 1963, from [2]_. Examples -------- >>> Suratman(1E-4, 1000., 1E-3, 1E-1) 10000.0 References ---------- .. [1] Sen, Nilava. "Suratman Number in Bubble-to-Slug Flow Pattern Transition under Microgravity." Acta Astronautica 65, no. 3-4 (August 2009): 423-28. doi:10.1016/j.actaastro.2009.02.013. .. [2] Catchpole, John P., and George. Fulford. "DIMENSIONLESS GROUPS." Industrial & Engineering Chemistry 58, no. 3 (March 1, 1966): 46-60. doi:10.1021/ie50675a012. ''' return rho*sigma*L/(mu*mu)
r'''Calculates Suratman number, `Su`, for a fluid with the given characteristic length, density, viscosity, and surface tension. .. math:: \text{Su} = \frac{\rho\sigma L}{\mu^2} Parameters ---------- L : float Characteristic length [m] rho : float Density of fluid, [kg/m^3] mu : float Viscosity of fluid, [Pa*s] sigma : float Surface tension, [N/m] Returns ------- Su : float Suratman number [] Notes ----- Also known as Laplace number. Used in two-phase flow, especially the bubbly-slug regime. No confusion regarding the definition of this group has been observed. .. math:: \text{Su} = \frac{\text{Re}^2}{\text{We}} =\frac{\text{Inertia}\cdot \text{Surface tension} }{\text{(viscous forces)}^2} The oldest reference to this group found by the author is in 1963, from [2]_. Examples -------- >>> Suratman(1E-4, 1000., 1E-3, 1E-1) 10000.0 References ---------- .. [1] Sen, Nilava. "Suratman Number in Bubble-to-Slug Flow Pattern Transition under Microgravity." Acta Astronautica 65, no. 3-4 (August 2009): 423-28. doi:10.1016/j.actaastro.2009.02.013. .. [2] Catchpole, John P., and George. Fulford. "DIMENSIONLESS GROUPS." Industrial & Engineering Chemistry 58, no. 3 (March 1, 1966): 46-60. doi:10.1021/ie50675a012.
def get_all_metadata( self, bucket: str, key: str ) -> dict: """ Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata """ try: return self.s3_client.head_object( Bucket=bucket, Key=key ) except botocore.exceptions.ClientError as ex: if str(ex.response['Error']['Code']) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata
def _get_future_tasks(self): """Assemble a list of future alerts""" self.alerts = {} now = std_now() for task in objectmodels['task'].find({'alert_time': {'$gt': now}}): self.alerts[task.alert_time] = task self.log('Found', len(self.alerts), 'future tasks')
Assemble a list of future alerts
def has_next_assessment_part(self, assessment_part_id): """This supports the basic simple sequence case. Can be overriden in a record for other cases""" if not self.supports_child_ordering or not self.supports_simple_child_sequencing: raise AttributeError() # Only available through a record extension if 'childIds' in self._my_map and str(assessment_part_id) in self._my_map['childIds']: if self._my_map['childIds'][-1] != str(assessment_part_id): return True else: return False raise errors.NotFound('the Part with Id ' + str(assessment_part_id) + ' is not a child of this Part')
This supports the basic simple sequence case. Can be overriden in a record for other cases
def output_filename(output_dir, key_handle, public_id): """ Return an output filename for a generated AEAD. Creates a hashed directory structure using the last three bytes of the public id to get equal usage. """ parts = [output_dir, key_handle] + pyhsm.util.group(public_id, 2) path = os.path.join(*parts) if not os.path.isdir(path): os.makedirs(path) return os.path.join(path, public_id)
Return an output filename for a generated AEAD. Creates a hashed directory structure using the last three bytes of the public id to get equal usage.
def deprecate(message): """Loudly prints warning.""" warnings.simplefilter('default') warnings.warn(message, category=DeprecationWarning) warnings.resetwarnings()
Loudly prints warning.
def _extract_apis_from_function(logical_id, function_resource, collector): """ Fetches a list of APIs configured for this SAM Function resource. Parameters ---------- logical_id : str Logical ID of the resource function_resource : dict Contents of the function resource including its properties collector : ApiCollector Instance of the API collector that where we will save the API information """ resource_properties = function_resource.get("Properties", {}) serverless_function_events = resource_properties.get(SamApiProvider._FUNCTION_EVENT, {}) SamApiProvider._extract_apis_from_events(logical_id, serverless_function_events, collector)
Fetches a list of APIs configured for this SAM Function resource. Parameters ---------- logical_id : str Logical ID of the resource function_resource : dict Contents of the function resource including its properties collector : ApiCollector Instance of the API collector that where we will save the API information
def getExtn(fimg, extn=None): """ Returns the PyFITS extension corresponding to extension specified in filename. Defaults to returning the first extension with data or the primary extension, if none have data. If a non-existent extension has been specified, it raises a `KeyError` exception. """ # If no extension is provided, search for first extension # in FITS file with data associated with it. if extn is None: # Set up default to point to PRIMARY extension. _extn = fimg[0] # then look for first extension with data. for _e in fimg: if _e.data is not None: _extn = _e break else: # An extension was provided, so parse it out... if repr(extn).find(',') > 1: if isinstance(extn, tuple): # We have a tuple possibly created by parseExtn(), so # turn it into a list for easier manipulation. _extns = list(extn) if '' in _extns: _extns.remove('') else: _extns = extn.split(',') # Two values given for extension: # for example, 'sci,1' or 'dq,1' try: _extn = fimg[_extns[0], int(_extns[1])] except KeyError: _extn = None for e in fimg: hdr = e.header if ('extname' in hdr and hdr['extname'].lower() == _extns[0].lower() and hdr['extver'] == int(_extns[1])): _extn = e break elif repr(extn).find('/') > 1: # We are working with GEIS group syntax _indx = str(extn[:extn.find('/')]) _extn = fimg[int(_indx)] elif isinstance(extn, string_types): if extn.strip() == '': _extn = None # force error since invalid name was provided # Only one extension value specified... elif extn.isdigit(): # We only have an extension number specified as a string... _nextn = int(extn) else: # We only have EXTNAME specified... _nextn = None if extn.lower() == 'primary': _nextn = 0 else: i = 0 for hdu in fimg: isimg = 'extname' in hdu.header hdr = hdu.header if isimg and extn.lower() == hdr['extname'].lower(): _nextn = i break i += 1 if _nextn < len(fimg): _extn = fimg[_nextn] else: _extn = None else: # Only integer extension number given, or default of 0 is used. if int(extn) < len(fimg): _extn = fimg[int(extn)] else: _extn = None if _extn is None: raise KeyError('Extension %s not found' % extn) return _extn
Returns the PyFITS extension corresponding to extension specified in filename. Defaults to returning the first extension with data or the primary extension, if none have data. If a non-existent extension has been specified, it raises a `KeyError` exception.
def crosscov(x, y, axis=-1, all_lags=False, debias=True, normalize=True): """Returns the crosscovariance sequence between two ndarrays. This is performed by calling fftconvolve on x, y[::-1] Parameters ---------- x : ndarray y : ndarray axis : time axis all_lags : {True/False} whether to return all nonzero lags, or to clip the length of s_xy to be the length of x and y. If False, then the zero lag covariance is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2 debias : {True/False} Always removes an estimate of the mean along the axis, unless told not to (eg X and Y are known zero-mean) Returns ------- cxy : ndarray The crosscovariance function Notes ----- cross covariance of processes x and y is defined as .. math:: C_{xy}[k]=E\{(X(n+k)-E\{X\})(Y(n)-E\{Y\})^{*}\} where X and Y are discrete, stationary (or ergodic) random processes Also note that this routine is the workhorse for all auto/cross/cov/corr functions. """ if x.shape[axis] != y.shape[axis]: raise ValueError( 'crosscov() only works on same-length sequences for now' ) if debias: x = remove_bias(x, axis) y = remove_bias(y, axis) slicing = [slice(d) for d in x.shape] slicing[axis] = slice(None, None, -1) cxy = fftconvolve(x, y[tuple(slicing)].conj(), axis=axis, mode='full') N = x.shape[axis] if normalize: cxy /= N if all_lags: return cxy slicing[axis] = slice(N - 1, 2 * N - 1) return cxy[tuple(slicing)]
Returns the crosscovariance sequence between two ndarrays. This is performed by calling fftconvolve on x, y[::-1] Parameters ---------- x : ndarray y : ndarray axis : time axis all_lags : {True/False} whether to return all nonzero lags, or to clip the length of s_xy to be the length of x and y. If False, then the zero lag covariance is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2 debias : {True/False} Always removes an estimate of the mean along the axis, unless told not to (eg X and Y are known zero-mean) Returns ------- cxy : ndarray The crosscovariance function Notes ----- cross covariance of processes x and y is defined as .. math:: C_{xy}[k]=E\{(X(n+k)-E\{X\})(Y(n)-E\{Y\})^{*}\} where X and Y are discrete, stationary (or ergodic) random processes Also note that this routine is the workhorse for all auto/cross/cov/corr functions.
def parse(cls, gvid, exception=True): """ Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return: """ if gvid == 'invalid': return cls.get_class('null')(0) if not bool(gvid): return None if not isinstance(gvid, six.string_types): raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid))) try: if not cls.sl: # Civick and ACS include the SL, so can call from base type. if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ sl = fn(gvid[0:cls.sl_width]) else: sl = cls.sl # Otherwise must use derived class. except ValueError as e: if exception: raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e))) else: return cls.get_class('null')(0) try: cls = cls.sl_map[sl] except KeyError: if exception: raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl)) else: return cls.get_class('null')(0) m = cls.regex.match(gvid) if not m: raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str)) d = m.groupdict() if not d: return None if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ d = {k: fn(v) for k, v in d.items()} try: del d['sl'] except KeyError: pass return cls(**d)
Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return:
def pull_requests(self): ''' Looks for any of the following pull request formats in the description field: pr12345, pr 2345, PR2345, PR 2345 ''' pr_numbers = re.findall(r"[pP][rR]\s?[0-9]+", self.description) pr_numbers += re.findall(re.compile("pull\s?request\s?[0-9]+", re.IGNORECASE), self.description) # Remove Duplicates pr_numbers = [re.sub('[^0-9]','', p) for p in pr_numbers] return pr_numbers
Looks for any of the following pull request formats in the description field: pr12345, pr 2345, PR2345, PR 2345
def get_nexusvm_bindings(vlan_id, instance_id): """Lists nexusvm bindings.""" LOG.debug("get_nexusvm_bindings() called") return _lookup_all_nexus_bindings(instance_id=instance_id, vlan_id=vlan_id)
Lists nexusvm bindings.
def path(self, value): """Set path :param value: The value for path :type value: str :raises: None """ prepval = value.replace('\\', '/') self._path = posixpath.normpath(prepval)
Set path :param value: The value for path :type value: str :raises: None
def subcorpus(self, selector): """ Generates a new :class:`.Corpus` using the criteria in ``selector``. Accepts selector arguments just like :meth:`.Corpus.select`\. .. code-block:: python >>> corpus = Corpus(papers) >>> subcorpus = corpus.subcorpus(('date', 1995)) >>> subcorpus <tethne.classes.corpus.Corpus object at 0x10278ea10> """ subcorpus = self.__class__(self[selector], index_by=self.index_by, index_fields=self.indices.keys(), index_features=self.features.keys()) return subcorpus
Generates a new :class:`.Corpus` using the criteria in ``selector``. Accepts selector arguments just like :meth:`.Corpus.select`\. .. code-block:: python >>> corpus = Corpus(papers) >>> subcorpus = corpus.subcorpus(('date', 1995)) >>> subcorpus <tethne.classes.corpus.Corpus object at 0x10278ea10>
def visibility_changed(self, enable): """DockWidget visibility has changed""" super(SpyderPluginWidget, self).visibility_changed(enable) if enable and not self.pydocbrowser.is_server_running(): self.pydocbrowser.initialize()
DockWidget visibility has changed
def _create_spec_config(self, table_name, spec_documents): ''' Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec ''' _spec_table = self._resource.Table(table_name + '.spec') for doc in spec_documents: _spec_table.put_item(Item=doc)
Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec
def _message_hostgroup_parse(self, message): """ Parse given message and return list of group names and socket information. Socket information is parsed in :meth:`.WBeaconGouverneurMessenger._message_address_parse` method :param message: bytes :return: tuple of list of group names and WIPV4SocketInfo """ splitter_count = message.count(WHostgroupBeaconMessenger.__message_groups_splitter__) if splitter_count == 0: return [], WBeaconGouverneurMessenger._message_address_parse(self, message) elif splitter_count == 1: splitter_pos = message.find(WHostgroupBeaconMessenger.__message_groups_splitter__) groups = [] group_splitter = WHostgroupBeaconMessenger.__group_splitter__ for group_name in message[(splitter_pos + 1):].split(group_splitter): groups.append(group_name.strip()) address = WBeaconGouverneurMessenger._message_address_parse(self, message[:splitter_pos]) return groups, address else: raise ValueError('Invalid message. Too many separators')
Parse given message and return list of group names and socket information. Socket information is parsed in :meth:`.WBeaconGouverneurMessenger._message_address_parse` method :param message: bytes :return: tuple of list of group names and WIPV4SocketInfo
def _cmd_line_parser(): ''' return a command line parser. It is used when generating the documentation ''' parser = argparse.ArgumentParser() parser.add_argument('--path', help=('path to test files, ' 'if not provided the script folder is used')) parser.add_argument('--text_output', action='store_true', help='option to save the results to text file') parser.add_argument('--format', default='rst', nargs='?', choices=['rst', 'md'], help='text formatting') return parser
return a command line parser. It is used when generating the documentation
def init_environment(): """Set environment variables that are important for the pipeline. :returns: None :rtype: None :raises: None """ os.environ['DJANGO_SETTINGS_MODULE'] = 'jukeboxcore.djsettings' pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), constants.BUILTIN_PLUGIN_PATH)) os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath
Set environment variables that are important for the pipeline. :returns: None :rtype: None :raises: None
def send_message(self, app_mxit_id, target_user_ids, message='', contains_markup=True, spool=None, spool_timeout=None, links=None, scope='message/send'): """ Send a message (from a Mxit app) to a list of Mxit users """ data = { 'From': app_mxit_id, 'To': ",".join(target_user_ids), 'Body': message, 'ContainsMarkup': contains_markup } if spool: data['Spool'] = spool if spool_timeout: data['SpoolTimeOut'] = spool_timeout if links: data['Links'] = links return _post( token=self.oauth.get_app_token(scope), uri='/message/send', data=data )
Send a message (from a Mxit app) to a list of Mxit users
def parentLayer(self): """ returns information about the parent """ if self._parentLayer is None: from ..agol.services import FeatureService self.__init() url = os.path.dirname(self._url) self._parentLayer = FeatureService(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return self._parentLayer
returns information about the parent
def run(main=None, argv=None): """Runs the program with an optional 'main' function and 'argv' list.""" flags_obj = flags.FLAGS absl_flags_obj = absl_flags.FLAGS # Extract the args from the optional `argv` list. args = argv[1:] if argv else None # Parse the known flags from that list, or from the command # line otherwise. # pylint: disable=protected-access flags_passthrough = flags_obj._parse_flags(args=args) # pylint: enable=protected-access # Immediately after flags are parsed, bump verbosity to INFO if the flag has # not been set. if absl_flags_obj["verbosity"].using_default_value: absl_flags_obj.verbosity = 0 main = main or sys.modules['__main__'].main # Call the main function, passing through any arguments # to the final program. sys.exit(main(sys.argv[:1] + flags_passthrough))
Runs the program with an optional 'main' function and 'argv' list.
def get_events(self): """ Returns a list of all joystick events that have occurred since the last call to `get_events`. The list contains events in the order that they occurred. If no events have occurred in the intervening time, the result is an empty list. """ result = [] while self._wait(0): event = self._read() if event: result.append(event) return result
Returns a list of all joystick events that have occurred since the last call to `get_events`. The list contains events in the order that they occurred. If no events have occurred in the intervening time, the result is an empty list.
def _lei16(ins): ''' Compares & pops top 2 operands out of the stack, and checks if the 1st operand <= 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit signed version ''' output = _16bit_oper(ins.quad[2], ins.quad[3]) output.append('call __LEI16') output.append('push af') REQUIRES.add('lei16.asm') return output
Compares & pops top 2 operands out of the stack, and checks if the 1st operand <= 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit signed version
def fetch(self, key: object, default=None): """ Retrieves the related value from the stored user data. """ return self._user_data.get(key, default)
Retrieves the related value from the stored user data.
def generate(str, alg): """Generates an PIL image avatar based on the given input String. Acts as the main accessor to pagan.""" img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR) hashcode = hash_input(str, alg) pixelmap = setup_pixelmap(hashcode) draw_image(pixelmap, img) return img
Generates an PIL image avatar based on the given input String. Acts as the main accessor to pagan.
def _get_user_data(self): """ Base method for retrieving user data from a viz. """ url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/' r = requests.get(url) if r.status_code == 200: content = r.json() else: raise Exception('Error retrieving user data from server') return content
Base method for retrieving user data from a viz.
def open(self, filename, mode='r', **kwargs): ''' Open the file and return a file-like object. :param str filename: The storage root-relative filename :param str mode: The open mode (``(r|w)b?``) :raises FileNotFound: If trying to read a file that does not exists ''' if 'r' in mode and not self.backend.exists(filename): raise FileNotFound(filename) return self.backend.open(filename, mode, **kwargs)
Open the file and return a file-like object. :param str filename: The storage root-relative filename :param str mode: The open mode (``(r|w)b?``) :raises FileNotFound: If trying to read a file that does not exists
def position(self): """Returns (line, col) of the current position in the stream.""" line, col = self._position(self.chunkOffset) return (line + 1, col)
Returns (line, col) of the current position in the stream.
def parseReaderConfig(self, confdict): """Parse a reader configuration dictionary. Examples: { Type: 23, Data: b'\x00' } { Type: 1023, Vendor: 25882, Subtype: 21, Data: b'\x00' } """ logger.debug('parseReaderConfig input: %s', confdict) conf = {} for k, v in confdict.items(): if not k.startswith('Parameter'): continue ty = v['Type'] data = v['Data'] vendor = None subtype = None try: vendor, subtype = v['Vendor'], v['Subtype'] except KeyError: pass if ty == 1023: if vendor == 25882 and subtype == 37: tempc = struct.unpack('!H', data)[0] conf.update(temperature=tempc) else: conf[ty] = data return conf
Parse a reader configuration dictionary. Examples: { Type: 23, Data: b'\x00' } { Type: 1023, Vendor: 25882, Subtype: 21, Data: b'\x00' }
def count_sources(edge_iter: EdgeIterator) -> Counter: """Count the source nodes in an edge iterator with keys and data. :return: A counter of source nodes in the iterable """ return Counter(u for u, _, _ in edge_iter)
Count the source nodes in an edge iterator with keys and data. :return: A counter of source nodes in the iterable
def ordered_expected_layers(self): """Get an ordered list of layers according to users input. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :return: An ordered list of layers following a structure. :rtype: list """ registry = QgsProject.instance() layers = [] count = self.list_layers_in_map_report.count() for i in range(count): layer = self.list_layers_in_map_report.item(i) origin = layer.data(LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS['key']: key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE) layers.append(( FROM_ANALYSIS['key'], key, parent, None )) else: layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) layer = registry.mapLayer(layer_id) style_document = QDomDocument() layer.exportNamedStyle(style_document) layers.append(( FROM_CANVAS['key'], layer.name(), full_layer_uri(layer), style_document.toString() )) return layers
Get an ordered list of layers according to users input. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :return: An ordered list of layers following a structure. :rtype: list
def batch_predict_async(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format='csv', cloud=False): """Local and cloud batch prediction. Args: training_dir: The output folder of training. prediction_input_file: csv file pattern to a file. File must be on GCS if running cloud prediction output_dir: output location to save the results. Must be a GSC path if running cloud prediction. mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must contain a target column. If 'prediction', the input data must not contain a target column. batch_size: Int. How many instances to run in memory at once. Larger values mean better performace but more memeory consumed. shard_files: If False, the output files are not shardded. output_format: csv or json. Json file are json-newlined. cloud: If ture, does cloud batch prediction. If False, runs batch prediction locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ import google.datalab.utils as du with warnings.catch_warnings(): warnings.simplefilter("ignore") if cloud: runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format) job = du.DataflowJob(runner_results) else: runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format) job = du.LambdaJob(lambda: runner_results.wait_until_finish(), job_id=None) return job
Local and cloud batch prediction. Args: training_dir: The output folder of training. prediction_input_file: csv file pattern to a file. File must be on GCS if running cloud prediction output_dir: output location to save the results. Must be a GSC path if running cloud prediction. mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must contain a target column. If 'prediction', the input data must not contain a target column. batch_size: Int. How many instances to run in memory at once. Larger values mean better performace but more memeory consumed. shard_files: If False, the output files are not shardded. output_format: csv or json. Json file are json-newlined. cloud: If ture, does cloud batch prediction. If False, runs batch prediction locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
def toc(self, depth=6, lowest_level=6): """ Get table of content of currently fed HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: a list representing the TOC """ depth = min(max(depth, 0), 6) depth = 6 if depth == 0 else depth lowest_level = min(max(lowest_level, 1), 6) toc = self._root.to_dict()['children'] def traverse(curr_toc, dep, lowest_lvl, curr_depth=1): if curr_depth > dep: # clear all items of this depth and exit the recursion curr_toc.clear() return items_to_remove = [] for item in curr_toc: if item['level'] > lowest_lvl: # record item with low header level, for removing it later items_to_remove.append(item) else: traverse(item['children'], dep, lowest_lvl, curr_depth + 1) [curr_toc.remove(item) for item in items_to_remove] traverse(toc, depth, lowest_level) return toc
Get table of content of currently fed HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: a list representing the TOC
def initialize(name='', pool_size=10, host='localhost', password='', port=5432, user=''): """Initialize a new database connection and return the pool object. Saves a reference to that instance in a module-level variable, so applications with only one database can just call this function and not worry about pool objects. """ global pool instance = Pool(name=name, pool_size=pool_size, host=host, password=password, port=port, user=user) pool = instance return instance
Initialize a new database connection and return the pool object. Saves a reference to that instance in a module-level variable, so applications with only one database can just call this function and not worry about pool objects.
def _invoke_callbacks(self, *args, **kwargs): """Invoke all done callbacks.""" for callback in self._done_callbacks: _helpers.safe_invoke_callback(callback, *args, **kwargs)
Invoke all done callbacks.
def create_table( data, meta=None, fields=None, skip_header=True, import_fields=None, samples=None, force_types=None, max_rows=None, *args, **kwargs ): """Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data """ table_rows = iter(data) force_types = force_types or {} if import_fields is not None: import_fields = make_header(import_fields) # TODO: test max_rows if fields is None: # autodetect field types # TODO: may add `type_hints` parameter so autodetection can be easier # (plugins may specify some possible field types). header = make_header(next(table_rows)) if samples is not None: sample_rows = list(islice(table_rows, 0, samples)) table_rows = chain(sample_rows, table_rows) else: if max_rows is not None and max_rows > 0: sample_rows = table_rows = list(islice(table_rows, max_rows)) else: sample_rows = table_rows = list(table_rows) # Detect field types using only the desired columns detected_fields = detect_types( header, sample_rows, skip_indexes=[ index for index, field in enumerate(header) if field in force_types or field not in (import_fields or header) ], *args, **kwargs ) # Check if any field was added during detecting process new_fields = [ field_name for field_name in detected_fields.keys() if field_name not in header ] # Finally create the `fields` with both header and new field names, # based on detected fields `and force_types` fields = OrderedDict( [ (field_name, detected_fields.get(field_name, TextField)) for field_name in header + new_fields ] ) fields.update(force_types) # Update `header` and `import_fields` based on new `fields` header = list(fields.keys()) if import_fields is None: import_fields = header else: # using provided field types if not isinstance(fields, OrderedDict): raise ValueError("`fields` must be an `OrderedDict`") if skip_header: # If we're skipping the header probably this row is not trustable # (can be data or garbage). next(table_rows) header = make_header(list(fields.keys())) if import_fields is None: import_fields = header fields = OrderedDict( [(field_name, fields[key]) for field_name, key in zip(header, fields)] ) diff = set(import_fields) - set(header) if diff: field_names = ", ".join('"{}"'.format(field) for field in diff) raise ValueError("Invalid field names: {}".format(field_names)) fields = OrderedDict( [(field_name, fields[field_name]) for field_name in import_fields] ) get_row = get_items(*map(header.index, import_fields)) table = Table(fields=fields, meta=meta) if max_rows is not None and max_rows > 0: table_rows = islice(table_rows, max_rows) table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows) source = table.meta.get("source", None) if source is not None: if source.should_close: source.fobj.close() if source.should_delete and Path(source.uri).exists(): unlink(source.uri) return table
Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data
def file_renamed_in_data_in_editorstack(self, editorstack_id_str, original_filename, filename): """A file was renamed in data in editorstack, this notifies others""" for editorstack in self.editorstacks: if str(id(editorstack)) != editorstack_id_str: editorstack.rename_in_data(original_filename, filename)
A file was renamed in data in editorstack, this notifies others
def pkcs7_pad(buf): # type: (bytes) -> bytes """Appends PKCS7 padding to an input buffer :param bytes buf: buffer to add padding :rtype: bytes :return: buffer with PKCS7_PADDING """ padder = cryptography.hazmat.primitives.padding.PKCS7( cryptography.hazmat.primitives.ciphers. algorithms.AES.block_size).padder() return padder.update(buf) + padder.finalize()
Appends PKCS7 padding to an input buffer :param bytes buf: buffer to add padding :rtype: bytes :return: buffer with PKCS7_PADDING
def _make_names_unique(animations): """ Given a list of animations, some of which might have duplicate names, rename the first one to be <duplicate>_0, the second <duplicate>_1, <duplicate>_2, etc.""" counts = {} for a in animations: c = counts.get(a['name'], 0) + 1 counts[a['name']] = c if c > 1: a['name'] += '_' + str(c - 1) dupes = set(k for k, v in counts.items() if v > 1) for a in animations: if a['name'] in dupes: a['name'] += '_0'
Given a list of animations, some of which might have duplicate names, rename the first one to be <duplicate>_0, the second <duplicate>_1, <duplicate>_2, etc.