code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def update(self, data): '''Updates object information with live data (if live data has different values to stored object information). Changes will be automatically applied, but not persisted in the database. Call `db.session.add(elb)` manually to commit the changes to the DB. Args: # data (:obj:) AWS API Resource object fetched from AWS API data (:dict:) Dict representing ELB data retrieved from ELB client Returns: True if there were any changes to the object, False otherwise ''' updated = self.set_property('lb_name', data['LoadBalancerName']) updated |= self.set_property('dns_name', data['DNSName']) if 'CanonicalHostedZoneName' not in data: data['CanonicalHostedZoneName'] = None updated |= self.set_property( 'canonical_hosted_zone_name', data['CanonicalHostedZoneName'] ) # Apparently you can get an ELB that doesn't have a parent VPC if 'VPCId' in data: updated |= self.set_property('vpc_id', data['VPCId']) else: updated |= self.set_property('vpc_id', 'None') # Instances # ELBs list instances as [{'InstanceId': <instance_id>}, ...] Sigh. instances = [instance['InstanceId'] for instance in data['Instances']] if sorted(instances) != sorted(self.get_property('instances')): self.set_property('instances', instances) updated = True # Tags (not currently in use, but for future reference) if 'Tags' not in data: data['Tags'] = {} tags = {x['Key']: x['Value'] for x in data['Tags'] or {}} existing_tags = {x.key: x for x in self.tags} # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) # Metrics if 'Metrics' not in data: data['Metrics'] = {} updated |= self.set_property('metrics', data['Metrics']) return updated
Updates object information with live data (if live data has different values to stored object information). Changes will be automatically applied, but not persisted in the database. Call `db.session.add(elb)` manually to commit the changes to the DB. Args: # data (:obj:) AWS API Resource object fetched from AWS API data (:dict:) Dict representing ELB data retrieved from ELB client Returns: True if there were any changes to the object, False otherwise
def remove(self, env_path): """Remove metadata for a given virtualenv from cache.""" with filelock(self.lockpath): cache = self._read_cache() logger.debug("Removing virtualenv from cache: %s" % env_path) lines = [ line for line in cache if json.loads(line).get('metadata', {}).get('env_path') != env_path ] self._write_cache(lines)
Remove metadata for a given virtualenv from cache.
def has_valid_soma(data_wrapper): '''Check if a data block has a valid soma Returns: CheckResult with result ''' try: make_soma(data_wrapper.soma_points()) return CheckResult(True) except SomaError: return CheckResult(False)
Check if a data block has a valid soma Returns: CheckResult with result
def nodes(self) -> List[str]: """Return the list of nodes configured in the scenario's yaml. Should the scenario use version 1, we check if there is a 'setting'. If so, we derive the list of nodes from this dictionary, using its 'first', 'last' and 'template' keys. Should any of these keys be missing, we throw an appropriate exception. If the scenario version is not 1, or no 'range' setting exists, we use the 'list' settings key and return the value. Again, should the key be absent, we throw an appropriate error. :raises MissingNodesConfiguration: if the scenario version is 1 and a 'range' key was detected, but any one of the keys 'first', 'last', 'template' are missing; *or* the scenario version is not 1 or the 'range' key and the 'list' are absent. """ if self._scenario_version == 1 and 'range' in self._config: range_config = self._config['range'] try: start, stop = range_config['first'], range_config['last'] + 1 except KeyError: raise MissingNodesConfiguration( 'Setting "range" must be a dict containing keys "first" and "last",' ' whose values are integers!', ) try: template = range_config['template'] except KeyError: raise MissingNodesConfiguration( 'Must specify "template" setting when giving "range" setting.', ) return [template.format(i) for i in range(start, stop)] try: return self._config['list'] except KeyError: raise MissingNodesConfiguration('Must specify nodes under "list" setting!')
Return the list of nodes configured in the scenario's yaml. Should the scenario use version 1, we check if there is a 'setting'. If so, we derive the list of nodes from this dictionary, using its 'first', 'last' and 'template' keys. Should any of these keys be missing, we throw an appropriate exception. If the scenario version is not 1, or no 'range' setting exists, we use the 'list' settings key and return the value. Again, should the key be absent, we throw an appropriate error. :raises MissingNodesConfiguration: if the scenario version is 1 and a 'range' key was detected, but any one of the keys 'first', 'last', 'template' are missing; *or* the scenario version is not 1 or the 'range' key and the 'list' are absent.
def jsonld(client, datasets): """Format datasets as JSON-LD.""" from renku.models._json import dumps from renku.models._jsonld import asjsonld data = [ asjsonld( dataset, basedir=os.path.relpath( '.', start=str(dataset.__reference__.parent) ) ) for dataset in datasets ] click.echo(dumps(data, indent=2))
Format datasets as JSON-LD.
def get_buffer(self): """Get buffer which needs to be bulked to elasticsearch""" # Get sources for documents which are in Elasticsearch # and they are not in local buffer if self.doc_to_update: self.update_sources() ES_buffer = self.action_buffer self.clean_up() return ES_buffer
Get buffer which needs to be bulked to elasticsearch
def join(self): '''Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. ''' with self._parent._all_tasks_done: while self._parent._unfinished_tasks: self._parent._all_tasks_done.wait()
Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks.
def query(number, domains, resolver=None): """Look for NAPTR RRs for the specified number in the specified domains. e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.']) """ if resolver is None: resolver = dns.resolver.get_default_resolver() for domain in domains: if isinstance(domain, (str, unicode)): domain = dns.name.from_text(domain) qname = dns.e164.from_e164(number, domain) try: return resolver.query(qname, 'NAPTR') except dns.resolver.NXDOMAIN: pass raise dns.resolver.NXDOMAIN
Look for NAPTR RRs for the specified number in the specified domains. e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
def _sort_r(sorted, processed, key, deps, dependency_tree): """Recursive topological sort implementation.""" if key in processed: return processed.add(key) for dep_key in deps: dep_deps = dependency_tree.get(dep_key) if dep_deps is None: log.debug('"%s" not found, skipped', Repr(dep_key)) continue _sort_r(sorted, processed, dep_key, dep_deps, dependency_tree) sorted.append((key, deps))
Recursive topological sort implementation.
def from_shorthand(shorthand_string, slash=None): """Take a chord written in shorthand and return the notes in the chord. The function can recognize triads, sevenths, sixths, ninths, elevenths, thirteenths, slashed chords and a number of altered chords. The second argument should not be given and is only used for a recursive call when a slashed chord or polychord is found. See http://tinyurl.com/3hn6v8u for a nice overview of chord patterns. Examples: >>> from_shorthand('Amin') ['A', 'C', 'E'] >>> from_shorthand('Am/M7') ['A', 'C', 'E', 'G#'] >>> from_shorthand('A') ['A', 'C#', 'E'] >>> from_shorthand('A/G') ['G', 'A', 'C#', 'E'] >>> from_shorthand('Dm|G') ['G', 'B', 'D', 'F', 'A'] Recognised abbreviations: the letters "m" and "M" in the following abbreviations can always be substituted by respectively "min", "mi" or "-" and "maj" or "ma". Example: >>> from_shorthand('Amin7') == from_shorthand('Am7') True Triads: 'm', 'M' or '', 'dim' Sevenths: 'm7', 'M7', '7', 'm7b5', 'dim7', 'm/M7' or 'mM7' Augmented chords: 'aug' or '+', '7#5' or 'M7+5', 'M7+', 'm7+', '7+' Suspended chords: 'sus4', 'sus2', 'sus47' or '7sus4', 'sus', '11', 'sus4b9' or 'susb9' Sixths: '6', 'm6', 'M6', '6/7' or '67', '6/9' or '69' Ninths: '9' or 'add9', 'M9', 'm9', '7b9', '7#9' Elevenths: '11' or 'add11', '7#11', 'm11' Thirteenths: '13' or 'add13', 'M13', 'm13' Altered chords: '7b5', '7b9', '7#9', '67' or '6/7' Special: '5', 'NC', 'hendrix' """ # warning reduce?? if type(shorthand_string) == list: res = [] for x in shorthand_string: res.append(from_shorthand(x)) return res if shorthand_string in ['NC', 'N.C.']: return [] # Shrink shorthand_string to a format recognised by chord_shorthand shorthand_string = shorthand_string.replace('min', 'm') shorthand_string = shorthand_string.replace('mi', 'm') shorthand_string = shorthand_string.replace('-', 'm') shorthand_string = shorthand_string.replace('maj', 'M') shorthand_string = shorthand_string.replace('ma', 'M') # Get the note name if not notes.is_valid_note(shorthand_string[0]): raise NoteFormatError, "Unrecognised note '%s' in chord '%s'"\ % (shorthand_string[0], shorthand_string) name = shorthand_string[0] # Look for accidentals for n in shorthand_string[1:]: if n == '#': name += n elif n == 'b': name += n else: break # Look for slashes and polychords '|' slash_index = -1 s = 0 rest_of_string = shorthand_string[len(name):] for n in rest_of_string: if n == '/': slash_index = s elif n == '|': # Generate polychord return from_shorthand(shorthand_string[:len(name) + s], from_shorthand(shorthand_string[len(name) + s + 1:])) s += 1 # Generate slash chord if slash_index != -1 and rest_of_string not in ['m/M7', '6/9', '6/7']: res = shorthand_string[:len(name) + slash_index] return from_shorthand(shorthand_string[:len(name) + slash_index], shorthand_string[len(name) + slash_index + 1:]) shorthand_start = len(name) short_chord = shorthand_string[shorthand_start:] if chord_shorthand.has_key(short_chord): res = chord_shorthand[short_chord](name) if slash != None: # Add slashed chords if type(slash) == str: if notes.is_valid_note(slash): res = [slash] + res else: raise NoteFormatError, \ "Unrecognised note '%s' in slash chord'%s'" % (slash, slash + shorthand_string) elif type(slash) == list: # Add polychords r = slash for n in res: if n != r[-1]: r.append(n) return r return res else: raise FormatError, 'Unknown shorthand: %s' % shorthand_string
Take a chord written in shorthand and return the notes in the chord. The function can recognize triads, sevenths, sixths, ninths, elevenths, thirteenths, slashed chords and a number of altered chords. The second argument should not be given and is only used for a recursive call when a slashed chord or polychord is found. See http://tinyurl.com/3hn6v8u for a nice overview of chord patterns. Examples: >>> from_shorthand('Amin') ['A', 'C', 'E'] >>> from_shorthand('Am/M7') ['A', 'C', 'E', 'G#'] >>> from_shorthand('A') ['A', 'C#', 'E'] >>> from_shorthand('A/G') ['G', 'A', 'C#', 'E'] >>> from_shorthand('Dm|G') ['G', 'B', 'D', 'F', 'A'] Recognised abbreviations: the letters "m" and "M" in the following abbreviations can always be substituted by respectively "min", "mi" or "-" and "maj" or "ma". Example: >>> from_shorthand('Amin7') == from_shorthand('Am7') True Triads: 'm', 'M' or '', 'dim' Sevenths: 'm7', 'M7', '7', 'm7b5', 'dim7', 'm/M7' or 'mM7' Augmented chords: 'aug' or '+', '7#5' or 'M7+5', 'M7+', 'm7+', '7+' Suspended chords: 'sus4', 'sus2', 'sus47' or '7sus4', 'sus', '11', 'sus4b9' or 'susb9' Sixths: '6', 'm6', 'M6', '6/7' or '67', '6/9' or '69' Ninths: '9' or 'add9', 'M9', 'm9', '7b9', '7#9' Elevenths: '11' or 'add11', '7#11', 'm11' Thirteenths: '13' or 'add13', 'M13', 'm13' Altered chords: '7b5', '7b9', '7#9', '67' or '6/7' Special: '5', 'NC', 'hendrix'
def _calculate_period(self, vals): ''' calculate the sampling period in seconds ''' if len(vals) < 4: return None if self.firmware['major'] < 16: return ((vals[3] << 24) | (vals[2] << 16) | (vals[1] << 8) | vals[0]) / 12e6 else: return self._calculate_float(vals)
calculate the sampling period in seconds
def set_label_elements(self, wanted_label_elements): """ Set one or more label elements based on at least one of the others """ if isinstance(wanted_label_elements, str): wanted_label_elements = [wanted_label_elements] # Figure out which desired label elements are missing missing_elements = [e for e in wanted_label_elements if getattr(self, e) is None] contained_elements = [e for e in ann_label_fields if getattr(self, e )is not None] if not contained_elements: raise Exception('No annotation labels contained in object') for e in missing_elements: self.convert_label_attribute(contained_elements[0], e) unwanted_label_elements = list(set(ann_label_fields) - set(wanted_label_elements)) self.rm_attributes(unwanted_label_elements) return
Set one or more label elements based on at least one of the others
def keyPressEvent(self, event): """Override Qt method""" if event.key() == Qt.Key_Alt: self._alt_key_is_down = True self.update()
Override Qt method
def from_curvilinear(cls, x, y, z, formatter=numpy_formatter): """Construct a contour generator from a curvilinear grid. Note ---- This is an alias for the default constructor. Parameters ---------- x : array_like x coordinates of each point in `z`. Must be the same size as `z`. y : array_like y coordinates of each point in `z`. Must be the same size as `z`. z : array_like The 2-dimensional curvilinear grid of data to compute contours for. Masked arrays are supported. formatter : callable A conversion function to convert from the internal `Matplotlib`_ contour format to an external format. See :ref:`formatters` for more information. Returns ------- : :class:`QuadContourGenerator` Initialized contour generator. """ return cls(x, y, z, formatter)
Construct a contour generator from a curvilinear grid. Note ---- This is an alias for the default constructor. Parameters ---------- x : array_like x coordinates of each point in `z`. Must be the same size as `z`. y : array_like y coordinates of each point in `z`. Must be the same size as `z`. z : array_like The 2-dimensional curvilinear grid of data to compute contours for. Masked arrays are supported. formatter : callable A conversion function to convert from the internal `Matplotlib`_ contour format to an external format. See :ref:`formatters` for more information. Returns ------- : :class:`QuadContourGenerator` Initialized contour generator.
def token(self): """ get the token """ header = self.default_headers.get('Authorization', '') prefex = 'Bearer ' if header.startswith(prefex): token = header[len(prefex):] else: token = header return token
get the token
def monitorSearchJob(self): """ Parameters: ---------------------------------------------------------------------- retval: nothing """ assert self.__searchJob is not None jobID = self.__searchJob.getJobID() startTime = time.time() lastUpdateTime = datetime.now() # Monitor HyperSearch and report progress # NOTE: may be -1 if it can't be determined expectedNumModels = self.__searchJob.getExpectedNumModels( searchMethod = self._options["searchMethod"]) lastNumFinished = 0 finishedModelIDs = set() finishedModelStats = _ModelStats() # Keep track of the worker state, results, and milestones from the job # record lastWorkerState = None lastJobResults = None lastModelMilestones = None lastEngStatus = None hyperSearchFinished = False while not hyperSearchFinished: jobInfo = self.__searchJob.getJobStatus(self._workers) # Check for job completion BEFORE processing models; NOTE: this permits us # to process any models that we may not have accounted for in the # previous iteration. hyperSearchFinished = jobInfo.isFinished() # Look for newly completed models, and process them modelIDs = self.__searchJob.queryModelIDs() _emit(Verbosity.DEBUG, "Current number of models is %d (%d of them completed)" % ( len(modelIDs), len(finishedModelIDs))) if len(modelIDs) > 0: # Build a list of modelIDs to check for completion checkModelIDs = [] for modelID in modelIDs: if modelID not in finishedModelIDs: checkModelIDs.append(modelID) del modelIDs # Process newly completed models if checkModelIDs: _emit(Verbosity.DEBUG, "Checking %d models..." % (len(checkModelIDs))) errorCompletionMsg = None for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)): _emit(Verbosity.DEBUG, "[%s] Checking completion: %s" % (i, modelInfo)) if modelInfo.isFinished(): finishedModelIDs.add(modelInfo.getModelID()) finishedModelStats.update(modelInfo) if (modelInfo.getCompletionReason().isError() and not errorCompletionMsg): errorCompletionMsg = modelInfo.getCompletionMsg() # Update the set of all encountered metrics keys (we will use # these to print column names in reports.csv) metrics = modelInfo.getReportMetrics() self.__foundMetrcsKeySet.update(metrics.keys()) numFinished = len(finishedModelIDs) # Print current completion stats if numFinished != lastNumFinished: lastNumFinished = numFinished if expectedNumModels is None: expModelsStr = "" else: expModelsStr = "of %s" % (expectedNumModels) stats = finishedModelStats print ("<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: " "%s; %s: %s; %s: %s; %s: %s; %s: %s]" % ( jobID, numFinished, expModelsStr, #stats.numCompletedSuccess, (stats.numCompletedEOF+stats.numCompletedStopped), "EOF" if stats.numCompletedEOF else "eof", stats.numCompletedEOF, "STOPPED" if stats.numCompletedStopped else "stopped", stats.numCompletedStopped, "KILLED" if stats.numCompletedKilled else "killed", stats.numCompletedKilled, "ERROR" if stats.numCompletedError else "error", stats.numCompletedError, "ORPHANED" if stats.numCompletedError else "orphaned", stats.numCompletedOrphaned, "UNKNOWN" if stats.numCompletedOther else "unknown", stats.numCompletedOther)) # Print the first error message from the latest batch of completed # models if errorCompletionMsg: print "ERROR MESSAGE: %s" % errorCompletionMsg # Print the new worker state, if it changed workerState = jobInfo.getWorkerState() if workerState != lastWorkerState: print "##>> UPDATED WORKER STATE: \n%s" % (pprint.pformat(workerState, indent=4)) lastWorkerState = workerState # Print the new job results, if it changed jobResults = jobInfo.getResults() if jobResults != lastJobResults: print "####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)" \ % (pprint.pformat(jobResults, indent=4), time.time()-startTime) lastJobResults = jobResults # Print the new model milestones if they changed modelMilestones = jobInfo.getModelMilestones() if modelMilestones != lastModelMilestones: print "##>> UPDATED MODEL MILESTONES: \n%s" % ( pprint.pformat(modelMilestones, indent=4)) lastModelMilestones = modelMilestones # Print the new engine status if it changed engStatus = jobInfo.getEngStatus() if engStatus != lastEngStatus: print "##>> UPDATED STATUS: \n%s" % (engStatus) lastEngStatus = engStatus # Sleep before next check if not hyperSearchFinished: if self._options["timeout"] != None: if ((datetime.now() - lastUpdateTime) > timedelta(minutes=self._options["timeout"])): print "Timeout reached, exiting" self.__cjDAO.jobCancel(jobID) sys.exit(1) time.sleep(1) # Tabulate results modelIDs = self.__searchJob.queryModelIDs() print "Evaluated %s models" % len(modelIDs) print "HyperSearch finished!" jobInfo = self.__searchJob.getJobStatus(self._workers) print "Worker completion message: %s" % (jobInfo.getWorkerCompletionMsg())
Parameters: ---------------------------------------------------------------------- retval: nothing
def series_with_permutation(self, other): """Compute the series product with another channel permutation circuit Args: other (CPermutation): Returns: Circuit: The composite permutation circuit (could also be the identity circuit for n channels) """ combined_permutation = tuple([self.permutation[p] for p in other.permutation]) return CPermutation.create(combined_permutation)
Compute the series product with another channel permutation circuit Args: other (CPermutation): Returns: Circuit: The composite permutation circuit (could also be the identity circuit for n channels)
def _calcDistance(self, inputPattern, distanceNorm=None): """Calculate the distances from inputPattern to all stored patterns. All distances are between 0.0 and 1.0 :param inputPattern The pattern from which distances to all other patterns are calculated :param distanceNorm Degree of the distance norm """ if distanceNorm is None: distanceNorm = self.distanceNorm # Sparse memory if self.useSparseMemory: if self._protoSizes is None: self._protoSizes = self._Memory.rowSums() overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern) inputPatternSum = inputPattern.sum() if self.distanceMethod == "rawOverlap": dist = inputPattern.sum() - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfInput": dist = inputPatternSum - overlapsWithProtos if inputPatternSum > 0: dist /= inputPatternSum elif self.distanceMethod == "pctOverlapOfProto": overlapsWithProtos /= self._protoSizes dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfLarger": maxVal = numpy.maximum(self._protoSizes, inputPatternSum) if maxVal.all() > 0: overlapsWithProtos /= maxVal dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "norm": dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern) distMax = dist.max() if distMax > 0: dist /= distMax else: raise RuntimeError("Unimplemented distance method %s" % self.distanceMethod) # Dense memory else: if self.distanceMethod == "norm": dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm) dist = dist.sum(1) dist = numpy.power(dist, 1.0/self.distanceNorm) dist /= dist.max() else: raise RuntimeError ("Not implemented yet for dense storage....") return dist
Calculate the distances from inputPattern to all stored patterns. All distances are between 0.0 and 1.0 :param inputPattern The pattern from which distances to all other patterns are calculated :param distanceNorm Degree of the distance norm
def build_data_set(self): "Construct a sequence of name/value pairs from controls" data = {} for field in self.fields: if field.name:# and field.enabled: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! val = field.get_value() if val is None: continue elif isinstance(val, unicode): # web2py string processing # requires utf-8 encoded text val = val.encode("utf-8") data[field.name] = val return data
Construct a sequence of name/value pairs from controls
def parse_if_range_header(value): """Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7 """ if not value: return IfRange() date = parse_date(value) if date is not None: return IfRange(date=date) # drop weakness information return IfRange(unquote_etag(value)[0])
Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7
def cast_scalar_to_array(shape, value, dtype=None): """ create np.ndarray of specified shape and dtype, filled with values Parameters ---------- shape : tuple value : scalar value dtype : np.dtype, optional dtype to coerce Returns ------- ndarray of shape, filled with value, of specified / inferred dtype """ if dtype is None: dtype, fill_value = infer_dtype_from_scalar(value) else: fill_value = value values = np.empty(shape, dtype=dtype) values.fill(fill_value) return values
create np.ndarray of specified shape and dtype, filled with values Parameters ---------- shape : tuple value : scalar value dtype : np.dtype, optional dtype to coerce Returns ------- ndarray of shape, filled with value, of specified / inferred dtype
def run_tasks(header, tasks): """Run a group of tasks with a header, footer and success/failure messages. Args: header: A message to print in the header bar before the tasks are run. tasks: A list of tuples containing a task title, a task, and a weight. If the tuple only contains two values, the weight is assumed to be one. """ tasks = list(tasks) with timed_display(header) as print_message: with tqdm(tasks, position=1, desc='Progress', disable=None, bar_format='{desc}{percentage:3.0f}% |{bar}|', total=sum(t[2] if len(t) > 2 else 1 for t in tasks), dynamic_ncols=True) as pbar: for task in tasks: print_message(task[0]) with display_status(): try: task[1]() finally: pbar.update(task[2] if len(task) > 2 else 1)
Run a group of tasks with a header, footer and success/failure messages. Args: header: A message to print in the header bar before the tasks are run. tasks: A list of tuples containing a task title, a task, and a weight. If the tuple only contains two values, the weight is assumed to be one.
def mouse_move(self, event): """ The following gets back coordinates of the mouse on the canvas. """ if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE): self.posX = event.xdata self.posY = event.ydata self.graphic_target(self.posX, self.posY)
The following gets back coordinates of the mouse on the canvas.
def openstack_upgrade_available(package): """ Determines if an OpenStack upgrade is available from installation source, based on version of installed package. :param package: str: Name of installed package. :returns: bool: : Returns True if configured installation source offers a newer version of package. """ import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: # The package has not been installed yet do not attempt upgrade return False if "swift" in package: codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: avail_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1
Determines if an OpenStack upgrade is available from installation source, based on version of installed package. :param package: str: Name of installed package. :returns: bool: : Returns True if configured installation source offers a newer version of package.
def sli_run(parameters=object(), fname='microcircuit.sli', verbosity='M_ERROR'): ''' Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag ''' # Load parameters from params file, and pass them to nest # Python -> SLI send_nest_params_to_sli(vars(parameters)) #set SLI verbosity nest.sli_run("%s setverbosity" % verbosity) # Run NEST/SLI simulation nest.sli_run('(%s) run' % fname)
Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag
def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None): """Given an array of numeric dates in netCDF format, convert it into a numpy array of date time objects. For standard (Gregorian) calendars, this function uses vectorized operations, which makes it much faster than cftime.num2date. In such a case, the returned array will be of type np.datetime64. Note that time unit in `units` must not be smaller than microseconds and not larger than days. See also -------- cftime.num2date """ num_dates = np.asarray(num_dates) flat_num_dates = num_dates.ravel() if calendar is None: calendar = 'standard' if use_cftime is None: try: dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar) except (OutOfBoundsDatetime, OverflowError): dates = _decode_datetime_with_cftime( flat_num_dates.astype(np.float), units, calendar) if (dates[np.nanargmin(num_dates)].year < 1678 or dates[np.nanargmax(num_dates)].year >= 2262): if calendar in _STANDARD_CALENDARS: warnings.warn( 'Unable to decode time axis into full ' 'numpy.datetime64 objects, continuing using ' 'cftime.datetime objects instead, reason: dates out ' 'of range', SerializationWarning, stacklevel=3) else: if calendar in _STANDARD_CALENDARS: dates = cftime_to_nptime(dates) elif use_cftime: dates = _decode_datetime_with_cftime( flat_num_dates.astype(np.float), units, calendar) else: dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar) return dates.reshape(num_dates.shape)
Given an array of numeric dates in netCDF format, convert it into a numpy array of date time objects. For standard (Gregorian) calendars, this function uses vectorized operations, which makes it much faster than cftime.num2date. In such a case, the returned array will be of type np.datetime64. Note that time unit in `units` must not be smaller than microseconds and not larger than days. See also -------- cftime.num2date
def remove_site(name): ''' Delete a website from IIS. :param str name: The IIS site name. Usage: .. code-block:: yaml defaultwebsite-remove: win_iis.remove_site: - name: Default Web Site ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} current_sites = __salt__['win_iis.list_sites']() if name not in current_sites: ret['comment'] = 'Site has already been removed: {0}'.format(name) ret['result'] = True elif __opts__['test']: ret['comment'] = 'Site will be removed: {0}'.format(name) ret['changes'] = {'old': name, 'new': None} else: ret['comment'] = 'Removed site: {0}'.format(name) ret['changes'] = {'old': name, 'new': None} ret['result'] = __salt__['win_iis.remove_site'](name) return ret
Delete a website from IIS. :param str name: The IIS site name. Usage: .. code-block:: yaml defaultwebsite-remove: win_iis.remove_site: - name: Default Web Site
def get_acquaintance_size(obj: Union[circuits.Circuit, ops.Operation]) -> int: """The maximum number of qubits to be acquainted with each other.""" if isinstance(obj, circuits.Circuit): if not is_acquaintance_strategy(obj): raise TypeError('not is_acquaintance_strategy(circuit)') return max(tuple(get_acquaintance_size(op) for op in obj.all_operations()) or (0,)) if not isinstance(obj, ops.Operation): raise TypeError('not isinstance(obj, (Circuit, Operation))') if not isinstance(obj, ops.GateOperation): return 0 if isinstance(obj.gate, AcquaintanceOpportunityGate): return len(obj.qubits) if isinstance(obj.gate, BipartiteSwapNetworkGate): return 2 if isinstance(obj.gate, ShiftSwapNetworkGate): return obj.gate.acquaintance_size() if isinstance(obj.gate, SwapNetworkGate): if obj.gate.acquaintance_size is None: return sum(sorted(obj.gate.part_lens)[-2:]) if (obj.gate.acquaintance_size - 1) in obj.gate.part_lens: return obj.gate.acquaintance_size sizer = getattr(obj.gate, '_acquaintance_size_', None) return 0 if sizer is None else sizer(len(obj.qubits))
The maximum number of qubits to be acquainted with each other.
def _split_keys_v1(joined): """ Split two keys out a string created by _join_keys_v1. """ left, _, right = joined.partition('::') return _decode_v1(left), _decode_v1(right)
Split two keys out a string created by _join_keys_v1.
def __construct_lda_model(self): """Method to create LDA model to procure list of topics from. We do that by first fetching the descriptions of repositories user has shown interest in. We tokenize the hence fetched descriptions to procure list of cleaned tokens by dropping all the stop words and language names from it. We use the cleaned and sanitized token list to train LDA model from which we hope to procure topics of interests to the authenticated user. """ # Fetch descriptions of repos of interest to authenticated user. repos_of_interest = self.__get_interests() # Procure clean tokens from the descriptions. cleaned_tokens = self.__clean_and_tokenize(repos_of_interest) # If cleaned tokens are empty, it can cause an exception while # generating LDA. But tokens shouldn't be something meaningful as that # would mean we are suggesting repos without reason. Hence the random # string to ensure that LDA doesn't cause exception but the token # doesn't generate any suggestions either. if not cleaned_tokens: cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]] # Setup LDA requisites. dictionary = corpora.Dictionary(cleaned_tokens) corpus = [dictionary.doc2bow(text) for text in cleaned_tokens] # Generate LDA model self.lda_model = models.ldamodel.LdaModel( corpus, num_topics=1, id2word=dictionary, passes=10 )
Method to create LDA model to procure list of topics from. We do that by first fetching the descriptions of repositories user has shown interest in. We tokenize the hence fetched descriptions to procure list of cleaned tokens by dropping all the stop words and language names from it. We use the cleaned and sanitized token list to train LDA model from which we hope to procure topics of interests to the authenticated user.
def _read_mode_sec(self, size, kind): """Read options with security info. Positional arguments: size - int, length of option kind - int, 130 (SEC )/ 133 (ESEC) Returns: * dict -- extracted option with security info (E/SEC) Structure of these options: * [RFC 1108] Security (SEC) +------------+------------+------------+-------------//----------+ | 10000010 | XXXXXXXX | SSSSSSSS | AAAAAAA[1] AAAAAAA0 | | | | | [0] | +------------+------------+------------+-------------//----------+ TYPE = 130 LENGTH CLASSIFICATION PROTECTION LEVEL AUTHORITY FLAGS * [RFC 1108] Extended Security (ESEC): +------------+------------+------------+-------//-------+ | 10000101 | 000LLLLL | AAAAAAAA | add sec info | +------------+------------+------------+-------//-------+ TYPE = 133 LENGTH ADDITIONAL ADDITIONAL SECURITY INFO SECURITY FORMAT CODE INFO Octets Bits Name Description 0 0 ip.sec.kind Kind (130) 0 0 ip.sec.type.copy Copied Flag (1) 0 1 ip.sec.type.class Option Class (0) 0 3 ip.sec.type.number Option Number (2) 1 8 ip.sec.length Length (≥3) 2 16 ip.sec.level Classification Level 3 24 ip.sec.flags Protection Authority Flags """ if size < 3: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _clvl = self._read_unpack(1) data = dict( kind=kind, type=self._read_opt_type(kind), length=size, level=_CLASSIFICATION_LEVEL.get(_clvl, _clvl), ) if size > 3: _list = list() for counter in range(3, size): _flag = self._read_binary(1) if (counter < size - 1 and not int(_flag[7], base=2)) \ or (counter == size - 1 and int(_flag[7], base=2)): raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _dict = dict() for (index, bit) in enumerate(_flag[:5]): _auth = _PROTECTION_AUTHORITY.get(index) _dict[_auth] = True if int(bit, base=2) else False _list.append(Info(_dict)) data['flags'] = tuple(_list) return data
Read options with security info. Positional arguments: size - int, length of option kind - int, 130 (SEC )/ 133 (ESEC) Returns: * dict -- extracted option with security info (E/SEC) Structure of these options: * [RFC 1108] Security (SEC) +------------+------------+------------+-------------//----------+ | 10000010 | XXXXXXXX | SSSSSSSS | AAAAAAA[1] AAAAAAA0 | | | | | [0] | +------------+------------+------------+-------------//----------+ TYPE = 130 LENGTH CLASSIFICATION PROTECTION LEVEL AUTHORITY FLAGS * [RFC 1108] Extended Security (ESEC): +------------+------------+------------+-------//-------+ | 10000101 | 000LLLLL | AAAAAAAA | add sec info | +------------+------------+------------+-------//-------+ TYPE = 133 LENGTH ADDITIONAL ADDITIONAL SECURITY INFO SECURITY FORMAT CODE INFO Octets Bits Name Description 0 0 ip.sec.kind Kind (130) 0 0 ip.sec.type.copy Copied Flag (1) 0 1 ip.sec.type.class Option Class (0) 0 3 ip.sec.type.number Option Number (2) 1 8 ip.sec.length Length (≥3) 2 16 ip.sec.level Classification Level 3 24 ip.sec.flags Protection Authority Flags
def update(self, ip_address=values.unset, friendly_name=values.unset, cidr_prefix_length=values.unset): """ Update the IpAddressInstance :param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today. :param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long. :param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used. :returns: Updated IpAddressInstance :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance """ data = values.of({ 'IpAddress': ip_address, 'FriendlyName': friendly_name, 'CidrPrefixLength': cidr_prefix_length, }) payload = self._version.update( 'POST', self._uri, data=data, ) return IpAddressInstance( self._version, payload, account_sid=self._solution['account_sid'], ip_access_control_list_sid=self._solution['ip_access_control_list_sid'], sid=self._solution['sid'], )
Update the IpAddressInstance :param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today. :param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long. :param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used. :returns: Updated IpAddressInstance :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance
def query_array(ncfile, name) -> numpy.ndarray: """Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> from hydpy.core import netcdftools >>> netcdftools.fillvalue = -999.0 >>> with TestIO(): ... with netcdf4.Dataset('test.nc', 'w') as ncfile: ... netcdftools.create_dimension(ncfile, 'dim1', 5) ... netcdftools.create_variable(ncfile, 'var1', 'f8', ('dim1',)) ... ncfile = netcdf4.Dataset('test.nc', 'r') >>> netcdftools.query_variable(ncfile, 'var1')[:].data array([-999., -999., -999., -999., -999.]) >>> netcdftools.query_array(ncfile, 'var1') array([ nan, nan, nan, nan, nan]) >>> import numpy >>> netcdftools.fillvalue = numpy.nan """ variable = query_variable(ncfile, name) maskedarray = variable[:] fillvalue_ = getattr(variable, '_FillValue', numpy.nan) if not numpy.isnan(fillvalue_): maskedarray[maskedarray.mask] = numpy.nan return maskedarray.data
Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> from hydpy.core import netcdftools >>> netcdftools.fillvalue = -999.0 >>> with TestIO(): ... with netcdf4.Dataset('test.nc', 'w') as ncfile: ... netcdftools.create_dimension(ncfile, 'dim1', 5) ... netcdftools.create_variable(ncfile, 'var1', 'f8', ('dim1',)) ... ncfile = netcdf4.Dataset('test.nc', 'r') >>> netcdftools.query_variable(ncfile, 'var1')[:].data array([-999., -999., -999., -999., -999.]) >>> netcdftools.query_array(ncfile, 'var1') array([ nan, nan, nan, nan, nan]) >>> import numpy >>> netcdftools.fillvalue = numpy.nan
def __generate_key(self, config): """ Generate the ssh key, and return the ssh config location """ cwd = config.get('ssh_path', self._install_directory()) if config.is_affirmative('create', default="yes"): if not os.path.exists(cwd): os.makedirs(cwd) if not os.path.exists(os.path.join(cwd, config.get('keyname'))): command = "ssh-keygen -t %(type)s -f %(keyname)s -N " % config.to_dict() lib.call(command, cwd=cwd, output_log_level=logging.DEBUG) if not config.has('ssh_path'): config.set('ssh_path', cwd) config.set('ssh_key_path', os.path.join(config.get('ssh_path'), config.get('keyname')))
Generate the ssh key, and return the ssh config location
def _remote_chmod(self, paths, mode, sudoable=False): """ Issue an asynchronous set_file_mode() call for every path in `paths`, then format the resulting return value list with fake_shell(). """ LOG.debug('_remote_chmod(%r, mode=%r, sudoable=%r)', paths, mode, sudoable) return self.fake_shell(lambda: mitogen.select.Select.all( self._connection.get_chain().call_async( ansible_mitogen.target.set_file_mode, path, mode ) for path in paths ))
Issue an asynchronous set_file_mode() call for every path in `paths`, then format the resulting return value list with fake_shell().
def html(text, lazy_images=False): """ To render a markdown format text into HTML. - If you want to also build a Table of Content inside of the markdow, add the tags: [TOC] It will include a <ul><li>...</ul> of all <h*> :param text: :param lazy_images: bool - If true, it will activate the LazyImageExtension :return: """ extensions = [ 'markdown.extensions.nl2br', 'markdown.extensions.sane_lists', 'markdown.extensions.toc', 'markdown.extensions.tables', OEmbedExtension() ] if lazy_images: extensions.append(LazyImageExtension()) return markdown.markdown(text, extensions=extensions)
To render a markdown format text into HTML. - If you want to also build a Table of Content inside of the markdow, add the tags: [TOC] It will include a <ul><li>...</ul> of all <h*> :param text: :param lazy_images: bool - If true, it will activate the LazyImageExtension :return:
def t_whitespace_or_comment(self, s): r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)' if '#' in s: # We have a comment matches = re.match('(\s+)(.*[\n]?)', s) if matches and self.is_newline: self.handle_indent_dedent(matches.group(1)) s = matches.group(2) if s.endswith("\n"): self.add_token('COMMENT', s[:-1]) self.add_token('NEWLINE', "\n") else: self.add_token('COMMENT', s) elif self.is_newline: self.handle_indent_dedent(s) pass return
r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)
def _prepare_b_jkl_mn(readout_povm, pauli_basis, pre_channel_ops, post_channel_ops, rho0): """ Prepare the coefficient matrix for process tomography. This function uses sparse matrices for much greater efficiency. The coefficient matrix is defined as: .. math:: B_{(jkl)(mn)}=\sum_{r,q}\pi_{jr}(\mathcal{R}_{k})_{rm} (\mathcal{R}_{l})_{nq} (\rho_0)_q where :math:`\mathcal{R}_{k}` is the transfer matrix of the quantum map corresponding to the k-th pre-measurement channel, while :math:`\mathcal{R}_{l}` is the transfer matrix of the l-th state preparation process. We also require the overlap between the (generalized) Pauli basis ops and the projection operators :math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`. See the grove documentation on tomography for detailed information. :param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation. :param list pre_channel_ops: The state preparation channel operators as `qutip.Qobj` :param list post_channel_ops: The pre-measurement (post circuit) channel operators as `qutip.Qobj` :param qutip.Qobj rho0: The initial state as a density matrix. :return: The coefficient matrix necessary to set up the binomial state tomography problem. :rtype: scipy.sparse.csr_matrix """ c_jk_m = state_tomography._prepare_c_jk_m(readout_povm, pauli_basis, post_channel_ops) pre_channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek)) for ek in pre_channel_ops] rho0_q = pauli_basis.project_op(rho0) # These next lines hide some very serious (sparse-)matrix index magic, # basically we exploit the same index math as in `qutip.sprepost()` # i.e., if a matrix X is linearly mapped `X -> A.dot(X).dot(B)` # then this can be rewritten as # `np.kron(B.T, A).dot(X.T.ravel()).reshape((B.shape[1], A.shape[0])).T` # The extra matrix transpose operations are necessary because numpy by default # uses row-major storage, whereas these operations are conventionally defined for column-major # storage. d_ln = spvstack([(rlnq * rho0_q).T for rlnq in pre_channel_transfer_matrices]).tocoo() b_jkl_mn = spkron(d_ln, c_jk_m).real return b_jkl_mn
Prepare the coefficient matrix for process tomography. This function uses sparse matrices for much greater efficiency. The coefficient matrix is defined as: .. math:: B_{(jkl)(mn)}=\sum_{r,q}\pi_{jr}(\mathcal{R}_{k})_{rm} (\mathcal{R}_{l})_{nq} (\rho_0)_q where :math:`\mathcal{R}_{k}` is the transfer matrix of the quantum map corresponding to the k-th pre-measurement channel, while :math:`\mathcal{R}_{l}` is the transfer matrix of the l-th state preparation process. We also require the overlap between the (generalized) Pauli basis ops and the projection operators :math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`. See the grove documentation on tomography for detailed information. :param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation. :param list pre_channel_ops: The state preparation channel operators as `qutip.Qobj` :param list post_channel_ops: The pre-measurement (post circuit) channel operators as `qutip.Qobj` :param qutip.Qobj rho0: The initial state as a density matrix. :return: The coefficient matrix necessary to set up the binomial state tomography problem. :rtype: scipy.sparse.csr_matrix
def logn_correlated_rate(parent_rate, branch_length, autocorrel_param, size=1): """ The log of the descendent rate, ln(Rd), is ~ N(mu, bl*ac), where the variance = bl*ac = branch_length * autocorrel_param, and mu is set so that E[Rd] = Rp: E[X] where ln(X) ~ N(mu, sigma^2) = exp(mu+(1/2)*sigma_sq) so Rp = exp(mu+(1/2)*bl*ac), ln(Rp) = mu + (1/2)*bl*ac, ln(Rp) - (1/2)*bl*ac = mu, so ln(Rd) ~ N(ln(Rp) - (1/2)*bl*ac, bl*ac) (NB: Var[Rd] = Rp^2 * (exp(bl*ac)-1), Std[Rd] = Rp * sqrt(exp(bl*ac)-1) See: H Kishino, J L Thorne, and W J Bruno (2001) """ if autocorrel_param <= 0: raise Exception('Autocorrelation parameter must be greater than 0') variance = branch_length * autocorrel_param stdev = np.sqrt(variance) ln_descendant_rate = np.random.normal(np.log(parent_rate) - 0.5 * variance, scale=stdev, size=size) descendant_rate = np.exp(ln_descendant_rate) return float(descendant_rate) if size == 1 else descendant_rate
The log of the descendent rate, ln(Rd), is ~ N(mu, bl*ac), where the variance = bl*ac = branch_length * autocorrel_param, and mu is set so that E[Rd] = Rp: E[X] where ln(X) ~ N(mu, sigma^2) = exp(mu+(1/2)*sigma_sq) so Rp = exp(mu+(1/2)*bl*ac), ln(Rp) = mu + (1/2)*bl*ac, ln(Rp) - (1/2)*bl*ac = mu, so ln(Rd) ~ N(ln(Rp) - (1/2)*bl*ac, bl*ac) (NB: Var[Rd] = Rp^2 * (exp(bl*ac)-1), Std[Rd] = Rp * sqrt(exp(bl*ac)-1) See: H Kishino, J L Thorne, and W J Bruno (2001)
def QA_fetch_lhb(date, db=DATABASE): '获取某一天龙虎榜数据' try: collections = db.lhb return pd.DataFrame([item for item in collections.find( {'date': date}, {"_id": 0})]).set_index('code', drop=False).sort_index() except Exception as e: raise e
获取某一天龙虎榜数据
def remove_bucket_list_item(self, id, collection, item): """ Removes an item from the bucket list Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. item: the bucket list item to remove Returns: The mongodb result """ if type(id) is not ObjectId: id = ObjectId(id) obj = getattr(self.db, collection) result = obj.update( {'_id': id}, {'$pull': {'bucket_list': item}} ) return result
Removes an item from the bucket list Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. item: the bucket list item to remove Returns: The mongodb result
def move_item_into_viewport(self, item): """Causes the `item` to be moved into the viewport The zoom factor and the position of the viewport are updated to move the `item` into the viewport. If `item` is not a `StateView`, the parental `StateView` is moved into the viewport. :param StateView | ConnectionView | PortView item: The item to be moved into the viewport """ if not item: return HORIZONTAL = 0 VERTICAL = 1 if not isinstance(item, Item): state_v = item.parent elif not isinstance(item, StateView): state_v = self.canvas.get_parent(item) else: state_v = item viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height) min_relative_size = min(viewport_size[i] / state_size[i] for i in [HORIZONTAL, VERTICAL]) if min_relative_size != 1: # Allow margin around state margin_relative = 1. / gui_constants.BORDER_WIDTH_STATE_SIZE_FACTOR zoom_factor = min_relative_size * (1 - margin_relative) if zoom_factor > 1: zoom_base = 4 zoom_factor = max(1, math.log(zoom_factor*zoom_base, zoom_base)) self.view.editor.zoom(zoom_factor) # The zoom operation must be performed before the pan operation to work on updated GtkAdjustments (scroll # bars) self.canvas.wait_for_update() state_pos = self.view.editor.get_matrix_i2v(state_v).transform_point(0, 0) state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height) viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height # Calculate offset around state so that the state is centered in the viewport padding_offset_horizontal = (viewport_size[HORIZONTAL] - state_size[HORIZONTAL]) / 2. padding_offset_vertical = (viewport_size[VERTICAL] - state_size[VERTICAL]) / 2. self.view.editor.hadjustment.set_value(state_pos[HORIZONTAL] - padding_offset_horizontal) self.view.editor.vadjustment.set_value(state_pos[VERTICAL] - padding_offset_vertical)
Causes the `item` to be moved into the viewport The zoom factor and the position of the viewport are updated to move the `item` into the viewport. If `item` is not a `StateView`, the parental `StateView` is moved into the viewport. :param StateView | ConnectionView | PortView item: The item to be moved into the viewport
def evaluate_postfix(tokens): """ Given a list of evaluatable tokens in postfix format, calculate a solution. """ stack = [] for token in tokens: total = None if is_int(token) or is_float(token) or is_constant(token): stack.append(token) elif is_unary(token): a = stack.pop() total = mathwords.UNARY_FUNCTIONS[token](a) elif len(stack): b = stack.pop() a = stack.pop() if token == '+': total = a + b elif token == '-': total = a - b elif token == '*': total = a * b elif token == '^': total = a ** b elif token == '/': if Decimal(str(b)) == 0: total = 'undefined' else: total = Decimal(str(a)) / Decimal(str(b)) else: raise PostfixTokenEvaluationException( 'Unknown token {}'.format(token) ) if total is not None: stack.append(total) # If the stack is empty the tokens could not be evaluated if not stack: raise PostfixTokenEvaluationException( 'The postfix expression resulted in an empty stack' ) return stack.pop()
Given a list of evaluatable tokens in postfix format, calculate a solution.
def insert_device_filter(self, position, filter_p): """Inserts the given USB device to the specified position in the list of filters. Positions are numbered starting from 0. If the specified position is equal to or greater than the number of elements in the list, the filter is added to the end of the collection. Duplicates are not allowed, so an attempt to insert a filter that is already in the collection, will return an error. :py:func:`device_filters` in position of type int Position to insert the filter to. in filter_p of type :class:`IUSBDeviceFilter` USB device filter to insert. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable. raises :class:`OleErrorInvalidarg` USB device filter not created within this VirtualBox instance. raises :class:`VBoxErrorInvalidObjectState` USB device filter already in list. """ if not isinstance(position, baseinteger): raise TypeError("position can only be an instance of type baseinteger") if not isinstance(filter_p, IUSBDeviceFilter): raise TypeError("filter_p can only be an instance of type IUSBDeviceFilter") self._call("insertDeviceFilter", in_p=[position, filter_p])
Inserts the given USB device to the specified position in the list of filters. Positions are numbered starting from 0. If the specified position is equal to or greater than the number of elements in the list, the filter is added to the end of the collection. Duplicates are not allowed, so an attempt to insert a filter that is already in the collection, will return an error. :py:func:`device_filters` in position of type int Position to insert the filter to. in filter_p of type :class:`IUSBDeviceFilter` USB device filter to insert. raises :class:`VBoxErrorInvalidVmState` Virtual machine is not mutable. raises :class:`OleErrorInvalidarg` USB device filter not created within this VirtualBox instance. raises :class:`VBoxErrorInvalidObjectState` USB device filter already in list.
def align(args): """ %prog align database.fasta read1.fq read2.fq Wrapper for `gsnap` single-end or paired-end, depending on the number of args. """ from jcvi.formats.fastq import guessoffset p = OptionParser(align.__doc__) p.add_option("--rnaseq", default=False, action="store_true", help="Input is RNA-seq reads, turn splicing on") p.add_option("--native", default=False, action="store_true", help="Convert GSNAP output to NATIVE format") p.set_home("eddyyeh") p.set_outdir() p.set_cpus() opts, args = p.parse_args(args) if len(args) == 2: logging.debug("Single-end alignment") elif len(args) == 3: logging.debug("Paired-end alignment") else: sys.exit(not p.print_help()) dbfile, readfile = args[:2] outdir = opts.outdir assert op.exists(dbfile) and op.exists(readfile) prefix = get_prefix(readfile, dbfile) logfile = op.join(outdir, prefix + ".log") gsnapfile = op.join(outdir, prefix + ".gsnap") nativefile = gsnapfile.rsplit(".", 1)[0] + ".unique.native" if not need_update((dbfile, readfile), gsnapfile): logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname) cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits if opts.rnaseq: cmd += " -N 1" cmd += " -t {0}".format(opts.cpus) cmd += " --gmap-mode none --nofails" if readfile.endswith(".gz"): cmd += " --gunzip" try: offset = "sanger" if guessoffset([readfile]) == 33 else "illumina" cmd += " --quality-protocol {0}".format(offset) except AssertionError: pass cmd += " " + " ".join(args[1:]) sh(cmd, outfile=gsnapfile, errfile=logfile) if opts.native: EYHOME = opts.eddyyeh_home if need_update(gsnapfile, nativefile): cmd = op.join(EYHOME, "convert2native.pl") cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile) cmd += " -proc {0}".format(opts.cpus) sh(cmd) return gsnapfile, logfile
%prog align database.fasta read1.fq read2.fq Wrapper for `gsnap` single-end or paired-end, depending on the number of args.
def is_url(): """ Validates that a fields value is a valid URL. """ # Stolen from Django regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) def validate(value): if not regex.match(value): return e("{} is not a valid URL", value) return validate
Validates that a fields value is a valid URL.
def omim(context, api_key, institute): """ Update the automate generated omim gene panel in the database. """ LOG.info("Running scout update omim") adapter = context.obj['adapter'] api_key = api_key or context.obj.get('omim_api_key') if not api_key: LOG.warning("Please provide a omim api key to load the omim gene panel") context.abort() institute_obj = adapter.institute(institute) if not institute_obj: LOG.info("Institute %s could not be found in database", institute) LOG.warning("Please specify an existing institute") context.abort() try: adapter.load_omim_panel(api_key, institute=institute) except Exception as err: LOG.error(err) context.abort()
Update the automate generated omim gene panel in the database.
def email(self, comment, content_object, request): """ Overwritten for a better email notification. """ if not self.email_notification: return send_comment_posted(comment, request)
Overwritten for a better email notification.
def non_decreasing(values): """True if values are not decreasing.""" return all(x <= y for x, y in zip(values, values[1:]))
True if values are not decreasing.
def _convert_to_hashable(data, types=True): r""" Converts `data` into a hashable byte representation if an appropriate hashing function is known. Args: data (object): ordered data with structure types (bool): include type prefixes in the hash Returns: tuple(bytes, bytes): prefix, hashable: a prefix hinting the original data type and the byte representation of `data`. Raises: TypeError : if data has no registered hash methods Example: >>> assert _convert_to_hashable(None) == (b'NULL', b'NONE') >>> assert _convert_to_hashable('string') == (b'TXT', b'string') >>> assert _convert_to_hashable(1) == (b'INT', b'\x01') >>> assert _convert_to_hashable(1.0) == (b'FLT', b'\x01/\x01') >>> assert _convert_to_hashable(_intlike[-1](1)) == (b'INT', b'\x01') """ # HANDLE MOST COMMON TYPES FIRST if data is None: hashable = b'NONE' prefix = b'NULL' elif isinstance(data, six.binary_type): hashable = data prefix = b'TXT' elif isinstance(data, six.text_type): # convert unicode into bytes hashable = data.encode('utf-8') prefix = b'TXT' elif isinstance(data, _intlike): # warnings.warn('Hashing ints is slow, numpy is prefered') hashable = _int_to_bytes(data) # hashable = data.to_bytes(8, byteorder='big') prefix = b'INT' elif isinstance(data, float): a, b = float(data).as_integer_ratio() hashable = _int_to_bytes(a) + b'/' + _int_to_bytes(b) prefix = b'FLT' else: # Then dynamically look up any other type hash_func = _HASHABLE_EXTENSIONS.lookup(data) prefix, hashable = hash_func(data) if types: return prefix, hashable else: return b'', hashable
r""" Converts `data` into a hashable byte representation if an appropriate hashing function is known. Args: data (object): ordered data with structure types (bool): include type prefixes in the hash Returns: tuple(bytes, bytes): prefix, hashable: a prefix hinting the original data type and the byte representation of `data`. Raises: TypeError : if data has no registered hash methods Example: >>> assert _convert_to_hashable(None) == (b'NULL', b'NONE') >>> assert _convert_to_hashable('string') == (b'TXT', b'string') >>> assert _convert_to_hashable(1) == (b'INT', b'\x01') >>> assert _convert_to_hashable(1.0) == (b'FLT', b'\x01/\x01') >>> assert _convert_to_hashable(_intlike[-1](1)) == (b'INT', b'\x01')
def match(self, context, line): """Match code lines prefixed with a variety of keywords.""" return line.kind == 'code' and line.partitioned[0] in self._both
Match code lines prefixed with a variety of keywords.
def flush(self, meta=None): '''Flush all model keys from the database''' pattern = self.basekey(meta) if meta else self.namespace return self.client.delpattern('%s*' % pattern)
Flush all model keys from the database
def join(self): """Wait for transfer to exit, raising errors as necessary.""" self.closed = True while self.expect > 0: val = self.wait_change.get() self.expect -= 1 if val is not None: # Wait a while for all running greenlets to exit, and # then attempt to force them to exit so join() # terminates in a reasonable amount of time. gevent.joinall(list(self.greenlets), timeout=30) gevent.killall(list(self.greenlets), block=True, timeout=30) raise val
Wait for transfer to exit, raising errors as necessary.
def r_passage(self, objectId, subreference, lang=None): """ Retrieve the text of the passage :param objectId: Collection identifier :type objectId: str :param lang: Lang in which to express main data :type lang: str :param subreference: Reference identifier :type subreference: str :return: Template, collections metadata and Markup object representing the text :rtype: {str: Any} """ collection = self.get_collection(objectId) if isinstance(collection, CtsWorkMetadata): editions = [t for t in collection.children.values() if isinstance(t, CtsEditionMetadata)] if len(editions) == 0: raise UnknownCollection("This work has no default edition") return redirect(url_for(".r_passage", objectId=str(editions[0].id), subreference=subreference)) text = self.get_passage(objectId=objectId, subreference=subreference) passage = self.transform(text, text.export(Mimetypes.PYTHON.ETREE), objectId) prev, next = self.get_siblings(objectId, subreference, text) return { "template": "main::text.html", "objectId": objectId, "subreference": subreference, "collections": { "current": { "label": collection.get_label(lang), "id": collection.id, "model": str(collection.model), "type": str(collection.type), "author": text.get_creator(lang), "title": text.get_title(lang), "description": text.get_description(lang), "citation": collection.citation, "coins": self.make_coins(collection, text, subreference, lang=lang) }, "parents": self.make_parents(collection, lang=lang) }, "text_passage": Markup(passage), "prev": prev, "next": next }
Retrieve the text of the passage :param objectId: Collection identifier :type objectId: str :param lang: Lang in which to express main data :type lang: str :param subreference: Reference identifier :type subreference: str :return: Template, collections metadata and Markup object representing the text :rtype: {str: Any}
def register_annotype_converter(cls, types, is_array=False, is_mapping=False): # type: (Union[Sequence[type], type], bool, bool) -> Any """Register this class as a converter for Anno instances""" if not isinstance(types, Sequence): types = [types] def decorator(subclass): for typ in types: cls._annotype_lookup[(typ, is_array, is_mapping)] = subclass return subclass return decorator
Register this class as a converter for Anno instances
def norm(self): """ Returns the norm of the quaternion norm = w**2 + x**2 + y**2 + z**2 """ tmp = self.w**2 + self.x**2 + self.y**2 + self.z**2 return tmp**0.5
Returns the norm of the quaternion norm = w**2 + x**2 + y**2 + z**2
def get_redirect_url(self, url, encrypt_code, card_id): """ 获取卡券跳转外链 """ from wechatpy.utils import WeChatSigner code = self.decrypt_code(encrypt_code) signer = WeChatSigner() signer.add_data(self.secret) signer.add_data(code) signer.add_data(card_id) signature = signer.signature r = '{url}?encrypt_code={code}&card_id={card_id}&signature={signature}' return r.format( url=url, code=encrypt_code, card_id=card_id, signature=signature )
获取卡券跳转外链
def write_back_register(self, reg, val): """ Sync register state from Manticore -> Unicorn""" if self.write_backs_disabled: return if issymbolic(val): logger.warning("Skipping Symbolic write-back") return if reg in self.flag_registers: self._emu.reg_write(self._to_unicorn_id('EFLAGS'), self._cpu.read_register('EFLAGS')) return self._emu.reg_write(self._to_unicorn_id(reg), val)
Sync register state from Manticore -> Unicorn
def filter(args): """ %prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features """ p = OptionParser(filter.__doc__) p.add_option("--type", default="mRNA", help="The feature to scan for the attributes [default: %default]") g1 = OptionGroup(p, "Filter by identity/coverage attribute values") g1.add_option("--id", default=95, type="float", help="Minimum identity [default: %default]") g1.add_option("--coverage", default=90, type="float", help="Minimum coverage [default: %default]") g1.add_option("--nocase", default=False, action="store_true", help="Case insensitive lookup of attribute names [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Filter by child feature bp length") g2.add_option("--child_ftype", default=None, type="str", help="Child featuretype to consider") g2.add_option("--child_bp", default=None, type="int", help="Filter by total bp of children of chosen ftype") p.add_option_group(g2) p.set_outfile() opts, args = p.parse_args(args) otype, oid, ocov = opts.type, opts.id, opts.coverage cftype, clenbp = opts.child_ftype, opts.child_bp id_attr, cov_attr = "Identity", "Coverage" if opts.nocase: id_attr, cov_attr = id_attr.lower(), cov_attr.lower() if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) bad = set() ptype = None for g in gffdb.features_of_type(otype, order_by=('seqid', 'start')): if not ptype: parent = list(gffdb.parents(g)) ptype = parent[0].featuretype \ if len(parent) > 0 else otype if cftype and clenbp: if gffdb.children_bp(g, child_featuretype=cftype) < clenbp: bad.add(g.id) elif oid and ocov: identity = float(g.attributes[id_attr][0]) coverage = float(g.attributes[cov_attr][0]) if identity < oid or coverage < ocov: bad.add(g.id) logging.debug("{0} bad accns marked.".format(len(bad))) fw = must_open(opts.outfile, "w") for g in gffdb.features_of_type(ptype, order_by=('seqid', 'start')): if ptype != otype: feats = list(gffdb.children(g, featuretype=otype, order_by=('start'))) ok_feats = [f for f in feats if f.id not in bad] if len(ok_feats) > 0: print(g, file=fw) for feat in ok_feats: print(feat, file=fw) for child in gffdb.children(feat, order_by=('start')): print(child, file=fw) else: if g.id not in bad: print(g, file=fw) for child in gffdb.children(g, order_by=('start')): print(child, file=fw) fw.close()
%prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features
def apply_injectables(self, targets): """Given an iterable of `Target` instances, apply their transitive injectables.""" target_types = {type(t) for t in targets} target_subsystem_deps = {s for s in itertools.chain(*(t.subsystems() for t in target_types))} for subsystem in target_subsystem_deps: # TODO: The is_initialized() check is primarily for tests and would be nice to do away with. if issubclass(subsystem, InjectablesMixin) and subsystem.is_initialized(): subsystem.global_instance().injectables(self)
Given an iterable of `Target` instances, apply their transitive injectables.
def apply_clicked(self, button): """Triggered when the Apply button in the source editor is clicked. """ if isinstance(self.model.state, LibraryState): logger.warning("It is not allowed to modify libraries.") self.view.set_text("") return # Ugly workaround to give user at least some feedback about the parser # Without the loop, this function would block the GTK main loop and the log message would appear after the # function has finished # TODO: run parser in separate thread while Gtk.events_pending(): Gtk.main_iteration_do(False) # get script current_text = self.view.get_text() # Directly apply script if linter was deactivated if not self.view['pylint_check_button'].get_active(): self.set_script_text(current_text) return logger.debug("Parsing execute script...") with open(self.tmp_file, "w") as text_file: text_file.write(current_text) # clear astroid module cache, see http://stackoverflow.com/questions/22241435/pylint-discard-cached-file-state MANAGER.astroid_cache.clear() lint_config_file = resource_filename(rafcon.__name__, "pylintrc") args = ["--rcfile={}".format(lint_config_file)] # put your own here with contextlib.closing(StringIO()) as dummy_buffer: json_report = JSONReporter(dummy_buffer.getvalue()) try: lint.Run([self.tmp_file] + args, reporter=json_report, exit=False) except: logger.exception("Could not run linter to check script") os.remove(self.tmp_file) if json_report.messages: def on_message_dialog_response_signal(widget, response_id): if response_id == 1: self.set_script_text(current_text) else: logger.debug("The script was not saved") widget.destroy() message_string = "Are you sure that you want to save this file?\n\nThe following errors were found:" line = None for message in json_report.messages: (error_string, line) = self.format_error_string(message) message_string += "\n\n" + error_string # focus line of error if line: tbuffer = self.view.get_buffer() start_iter = tbuffer.get_start_iter() start_iter.set_line(int(line)-1) tbuffer.place_cursor(start_iter) message_string += "\n\nThe line was focused in the source editor." self.view.scroll_to_cursor_onscreen() # select state to show source editor sm_m = state_machine_manager_model.get_state_machine_model(self.model) if sm_m.selection.get_selected_state() is not self.model: sm_m.selection.set(self.model) dialog = RAFCONButtonDialog(message_string, ["Save with errors", "Do not save"], on_message_dialog_response_signal, message_type=Gtk.MessageType.WARNING, parent=self.get_root_window()) result = dialog.run() else: self.set_script_text(current_text)
Triggered when the Apply button in the source editor is clicked.
def storage_expansion(network, basemap=True, scaling=1, filename=None): """ Plot storage distribution as circles on grid nodes Displays storage size and distribution in network. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis filename : str Specify filename If not given, figure will be show directly """ stores = network.storage_units[network.storage_units.carrier == 'extendable_storage'] batteries = stores[stores.max_hours == 6] hydrogen = stores[stores.max_hours == 168] storage_distribution =\ network.storage_units.p_nom_opt[stores.index].groupby( network.storage_units.bus).sum().reindex( network.buses.index, fill_value=0.) battery_distribution =\ network.storage_units.p_nom_opt[batteries.index].groupby( network.storage_units.bus).sum().reindex( network.buses.index, fill_value=0.) hydrogen_distribution =\ network.storage_units.p_nom_opt[hydrogen.index].groupby( network.storage_units.bus).sum().reindex( network.buses.index, fill_value=0.) sbatt = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) & ( network.storage_units.capital_cost > 10) & ( network.storage_units.max_hours == 6)] shydr = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) & ( network.storage_units.capital_cost > 10) & ( network.storage_units.max_hours == 168)] fig, ax = plt.subplots(1, 1) fig.set_size_inches(6, 6) msd_max = storage_distribution.max() msd_max_bat = battery_distribution.max() msd_max_hyd = hydrogen_distribution.max() if msd_max != 0: LabelVal = int(log10(msd_max)) else: LabelVal = 0 if LabelVal < 0: LabelUnit = 'kW' msd_max, msd_max_bat, msd_max_hyd = msd_max * \ 1000, msd_max_bat * 1000, msd_max_hyd * 1000 battery_distribution = battery_distribution * 1000 hydrogen_distribution = hydrogen_distribution * 1000 elif LabelVal < 3: LabelUnit = 'MW' else: LabelUnit = 'GW' msd_max, msd_max_bat, msd_max_hyd = msd_max / \ 1000, msd_max_bat / 1000, msd_max_hyd / 1000 battery_distribution = battery_distribution / 1000 hydrogen_distribution = hydrogen_distribution / 1000 if network.storage_units.p_nom_opt[sbatt].sum() < 1 and\ network.storage_units.p_nom_opt[shydr].sum() < 1: print("No storage unit to plot") elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and\ network.storage_units.p_nom_opt[shydr].sum() < 1: network.plot(bus_sizes=battery_distribution * scaling, bus_colors='orangered', ax=ax, line_widths=0.3) elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and\ network.storage_units.p_nom_opt[shydr].sum() > 1: network.plot(bus_sizes=hydrogen_distribution * scaling, bus_colors='teal', ax=ax, line_widths=0.3) else: network.plot(bus_sizes=battery_distribution * scaling, bus_colors='orangered', ax=ax, line_widths=0.3) network.plot(bus_sizes=hydrogen_distribution * scaling, bus_colors='teal', ax=ax, line_widths=0.3) if basemap and basemap_present: x = network.buses["x"] y = network.buses["y"] x1 = min(x) x2 = max(x) y1 = min(y) y2 = max(y) bmap = Basemap(resolution='l', epsg=network.srid, llcrnrlat=y1, urcrnrlat=y2, llcrnrlon=x1, urcrnrlon=x2, ax=ax) bmap.drawcountries() bmap.drawcoastlines() if msd_max_hyd !=0: plt.scatter([], [], c='teal', s=msd_max_hyd * scaling, label='= ' + str(round(msd_max_hyd, 0)) + LabelUnit + ' hydrogen storage') if msd_max_bat !=0: plt.scatter([], [], c='orangered', s=msd_max_bat * scaling, label='= ' + str(round(msd_max_bat, 0)) + LabelUnit + ' battery storage') plt.legend(scatterpoints=1, labelspacing=1, title='Storage size and technology', borderpad=1.3, loc=2) ax.set_title("Storage expansion") if filename is None: plt.show() else: plt.savefig(filename) plt.close() return
Plot storage distribution as circles on grid nodes Displays storage size and distribution in network. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis filename : str Specify filename If not given, figure will be show directly
def getGerritChanges(props): """ Get the gerrit changes This method could be overridden if really needed to accommodate for other custom steps method for fetching gerrit changes. :param props: an IProperty :return: (optionally via deferred) a list of dictionary with at list change_id, and revision_id, which format is the one accepted by the gerrit REST API as of /changes/:change_id/revision/:revision_id paths (see gerrit doc) """ if 'gerrit_changes' in props: return props.getProperty('gerrit_changes') if 'event.change.number' in props: return [{ 'change_id': props.getProperty('event.change.number'), 'revision_id': props.getProperty('event.patchSet.number') }] return []
Get the gerrit changes This method could be overridden if really needed to accommodate for other custom steps method for fetching gerrit changes. :param props: an IProperty :return: (optionally via deferred) a list of dictionary with at list change_id, and revision_id, which format is the one accepted by the gerrit REST API as of /changes/:change_id/revision/:revision_id paths (see gerrit doc)
def find_by_id(self, repoid): """ Returns the repo with the specified <repoid> """ for row in self.jsondata: if repoid == row["repoid"]: return self._infofromdict(row)
Returns the repo with the specified <repoid>
def emd2(a, b, M, processes=multiprocessing.cpu_count(), numItermax=100000, log=False, return_matrix=False): """Solves the Earth Movers distance problem and returns the loss .. math:: \gamma = arg\min_\gamma <\gamma,M>_F s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the metric cost matrix - a and b are the sample weights Uses the algorithm proposed in [1]_ Parameters ---------- a : (ns,) ndarray, float64 Source histogram (uniform weigth if empty list) b : (nt,) ndarray, float64 Target histogram (uniform weigth if empty list) M : (ns,nt) ndarray, float64 loss matrix numItermax : int, optional (default=100000) The maximum number of iterations before stopping the optimization algorithm if it has not converged. log: boolean, optional (default=False) If True, returns a dictionary containing the cost and dual variables. Otherwise returns only the optimal transportation cost. return_matrix: boolean, optional (default=False) If True, returns the optimal transportation matrix in the log. Returns ------- gamma: (ns x nt) ndarray Optimal transportation matrix for the given parameters log: dict If input log is true, a dictionary containing the cost and dual variables and exit status Examples -------- Simple example with obvious solution. The function emd accepts lists and perform automatic conversion to numpy arrays >>> import ot >>> a=[.5,.5] >>> b=[.5,.5] >>> M=[[0.,1.],[1.,0.]] >>> ot.emd2(a,b,M) 0.0 References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT""" a = np.asarray(a, dtype=np.float64) b = np.asarray(b, dtype=np.float64) M = np.asarray(M, dtype=np.float64) # if empty array given then use unifor distributions if len(a) == 0: a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0] if len(b) == 0: b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1] if log or return_matrix: def f(b): G, cost, u, v, resultCode = emd_c(a, b, M, numItermax) result_code_string = check_result(resultCode) log = {} if return_matrix: log['G'] = G log['u'] = u log['v'] = v log['warning'] = result_code_string log['result_code'] = resultCode return [cost, log] else: def f(b): G, cost, u, v, result_code = emd_c(a, b, M, numItermax) check_result(result_code) return cost if len(b.shape) == 1: return f(b) nb = b.shape[1] res = parmap(f, [b[:, i] for i in range(nb)], processes) return res
Solves the Earth Movers distance problem and returns the loss .. math:: \gamma = arg\min_\gamma <\gamma,M>_F s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the metric cost matrix - a and b are the sample weights Uses the algorithm proposed in [1]_ Parameters ---------- a : (ns,) ndarray, float64 Source histogram (uniform weigth if empty list) b : (nt,) ndarray, float64 Target histogram (uniform weigth if empty list) M : (ns,nt) ndarray, float64 loss matrix numItermax : int, optional (default=100000) The maximum number of iterations before stopping the optimization algorithm if it has not converged. log: boolean, optional (default=False) If True, returns a dictionary containing the cost and dual variables. Otherwise returns only the optimal transportation cost. return_matrix: boolean, optional (default=False) If True, returns the optimal transportation matrix in the log. Returns ------- gamma: (ns x nt) ndarray Optimal transportation matrix for the given parameters log: dict If input log is true, a dictionary containing the cost and dual variables and exit status Examples -------- Simple example with obvious solution. The function emd accepts lists and perform automatic conversion to numpy arrays >>> import ot >>> a=[.5,.5] >>> b=[.5,.5] >>> M=[[0.,1.],[1.,0.]] >>> ot.emd2(a,b,M) 0.0 References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT
def get_template(self, template_name): """Get the template which is at the given name""" try: return self.loader.load(template_name, encoding=self.encoding) except self.not_found_exception, e: # catch the exception raised by Genshi, convert it into a werkzeug # exception (for the sake of consistency) raise TemplateNotFound(template_name)
Get the template which is at the given name
def spi_ss_polarity(self, polarity): """Change the ouput polarity on the SS line. Please note, that this only affects the master functions. """ ret = api.py_aa_spi_master_ss_polarity(self.handle, polarity) _raise_error_if_negative(ret)
Change the ouput polarity on the SS line. Please note, that this only affects the master functions.
def validate_email(self, email): """ Validate if email exists and requires a verification. `validate_email` will set a `user` attribute on the instance allowing the view to send an email confirmation. """ try: self.user = User.objects.get_by_natural_key(email) except User.DoesNotExist: msg = _('A user with this email address does not exist.') raise serializers.ValidationError(msg) if self.user.email_verified: msg = _('User email address is already verified.') raise serializers.ValidationError(msg) return email
Validate if email exists and requires a verification. `validate_email` will set a `user` attribute on the instance allowing the view to send an email confirmation.
def get_capability_report(self, raw=True, cb=None): """ This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report """ task = asyncio.ensure_future(self.core.get_capability_report()) report = self.loop.run_until_complete(task) if raw: if cb: cb(report) else: return report else: # noinspection PyProtectedMember self.core._format_capability_report(report)
This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report
def parse_requestline(s): """ http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 >>> parse_requestline('GET / HTTP/1.0') ('GET', '/', '1.0') >>> parse_requestline('post /testurl htTP/1.1') ('POST', '/testurl', '1.1') >>> parse_requestline('Im not a RequestLine') Traceback (most recent call last): ... ValueError: Not a Request-Line """ methods = '|'.join(HttpBaseClass.METHODS) m = re.match(r'(' + methods + ')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) if m: return m.group(1).upper(), m.group(2), m.group(3) else: raise ValueError('Not a Request-Line')
http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 >>> parse_requestline('GET / HTTP/1.0') ('GET', '/', '1.0') >>> parse_requestline('post /testurl htTP/1.1') ('POST', '/testurl', '1.1') >>> parse_requestline('Im not a RequestLine') Traceback (most recent call last): ... ValueError: Not a Request-Line
def hash(self): """The hash value of the current revision""" if 'digest' not in self._p4dict: self._p4dict = self._connection.run(['fstat', '-m', '1', '-Ol', self.depotFile])[0] return self._p4dict['digest']
The hash value of the current revision
def styleInheritedFromParent(node, style): """ Returns the value of 'style' that is inherited from the parents of the passed-in node Warning: This method only considers presentation attributes and inline styles, any style sheets are ignored! """ parentNode = node.parentNode # return None if we reached the Document element if parentNode.nodeType == Node.DOCUMENT_NODE: return None # check styles first (they take precedence over presentation attributes) styles = _getStyle(parentNode) if style in styles: value = styles[style] if not value == 'inherit': return value # check attributes value = parentNode.getAttribute(style) if value not in ['', 'inherit']: return parentNode.getAttribute(style) # check the next parent recursively if we did not find a value yet return styleInheritedFromParent(parentNode, style)
Returns the value of 'style' that is inherited from the parents of the passed-in node Warning: This method only considers presentation attributes and inline styles, any style sheets are ignored!
def send_scheduled_messages(priority=None, ignore_unknown_messengers=False, ignore_unknown_message_types=False): """Sends scheduled messages. :param int, None priority: number to limit sending message by this priority. :param bool ignore_unknown_messengers: to silence UnknownMessengerError :param bool ignore_unknown_message_types: to silence UnknownMessageTypeError :raises UnknownMessengerError: :raises UnknownMessageTypeError: """ dispatches_by_messengers = Dispatch.group_by_messengers(Dispatch.get_unsent(priority=priority)) for messenger_id, messages in dispatches_by_messengers.items(): try: messenger_obj = get_registered_messenger_object(messenger_id) messenger_obj._process_messages(messages, ignore_unknown_message_types=ignore_unknown_message_types) except UnknownMessengerError: if ignore_unknown_messengers: continue raise
Sends scheduled messages. :param int, None priority: number to limit sending message by this priority. :param bool ignore_unknown_messengers: to silence UnknownMessengerError :param bool ignore_unknown_message_types: to silence UnknownMessageTypeError :raises UnknownMessengerError: :raises UnknownMessageTypeError:
def _update_dPrxy(self): """Update `dPrxy`.""" super(ExpCM_fitprefs, self)._update_dPrxy() if 'zeta' in self.freeparams: tildeFrxyQxy = self.tildeFrxy * self.Qxy j = 0 zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') for r in range(self.nsites): for i in range(N_AA - 1): zetari = self.zeta[j] zetaxterm.fill(0) zetayterm.fill(0) zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0) zetayterm[r][self._aa_for_y > i] = 1.0 / zetari zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0) self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm) _fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices) j += 1
Update `dPrxy`.
def load_config(self, config): """Load the outputs section of the configuration file.""" # Limit the number of processes to display in the WebUI if config is not None and config.has_section('outputs'): logger.debug('Read number of processes to display in the WebUI') n = config.get_value('outputs', 'max_processes_display', default=None) logger.debug('Number of processes to display in the WebUI: {}'.format(n))
Load the outputs section of the configuration file.
def depends_on(self, *keys): ''' Decorator that marks the wrapped as depending on specified provider keys. :param keys: Provider keys to mark as dependencies for wrapped :type keys: tuple :return: decorator :rtype: decorator ''' def decorator(wrapped): if keys: if wrapped not in self._dependencies: self._dependencies[wrapped] = set() self._dependencies[wrapped].update(keys) return wrapped return decorator
Decorator that marks the wrapped as depending on specified provider keys. :param keys: Provider keys to mark as dependencies for wrapped :type keys: tuple :return: decorator :rtype: decorator
def swpool(agent, nnames, lenvals, names): """ Add a name to the list of agents to notify whenever a member of a list of kernel variables is updated. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/swpool_c.html :param agent: The name of an agent to be notified after updates. :type agent: str :param nnames: The number of variables to associate with agent. :type nnames: int :param lenvals: Length of strings in the names array. :type lenvals: int :param names: Variable names whose update causes the notice. :type names: list of strs. """ agent = stypes.stringToCharP(agent) nnames = ctypes.c_int(nnames) lenvals = ctypes.c_int(lenvals) names = stypes.listToCharArray(names) libspice.swpool_c(agent, nnames, lenvals, names)
Add a name to the list of agents to notify whenever a member of a list of kernel variables is updated. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/swpool_c.html :param agent: The name of an agent to be notified after updates. :type agent: str :param nnames: The number of variables to associate with agent. :type nnames: int :param lenvals: Length of strings in the names array. :type lenvals: int :param names: Variable names whose update causes the notice. :type names: list of strs.
def get_number_of_app_ports(app): """ Get the number of ports for the given app JSON. This roughly follows the logic in marathon-lb for finding app IPs/ports, although we are only interested in the quantity of ports an app should have and don't consider the specific IPs/ports of individual tasks: https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415 :param app: The app JSON from the Marathon API. :return: The number of ports for the app. """ mode = _get_networking_mode(app) ports_list = None if mode == 'host': ports_list = _get_port_definitions(app) elif mode == 'container/bridge': ports_list = _get_port_definitions(app) if ports_list is None: ports_list = _get_container_port_mappings(app) elif mode == 'container': ports_list = _get_ip_address_discovery_ports(app) # Marathon 1.5+: the ipAddress field is missing -> ports_list is None # Marathon <1.5: the ipAddress field can be present, but ports_list can # still be empty while the container port mapping is not :-/ if not ports_list: ports_list = _get_container_port_mappings(app) else: raise RuntimeError( "Unknown Marathon networking mode '{}'".format(mode)) return len(ports_list)
Get the number of ports for the given app JSON. This roughly follows the logic in marathon-lb for finding app IPs/ports, although we are only interested in the quantity of ports an app should have and don't consider the specific IPs/ports of individual tasks: https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415 :param app: The app JSON from the Marathon API. :return: The number of ports for the app.
def get_port_mappings(self, port=None): """ Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None """ port_mappings = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not port: return port_mappings if str(port) not in self.get_ports(): return [] for p in port_mappings: if p.split("/")[0] == str(port): return port_mappings[p]
Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None
def main(): """Main script handler. Returns: int: 0 for success, >1 error code """ logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s') try: cli() return 0 except LocationsError as error: print(error) return 2 except RuntimeError as error: print(error) return 255 except OSError as error: return error.errno
Main script handler. Returns: int: 0 for success, >1 error code
def drop(self, relation): """Drop the named relation and cascade it appropriately to all dependent relations. Because dbt proactively does many `drop relation if exist ... cascade` that are noops, nonexistent relation drops cause a debug log and no other actions. :param str schema: The schema of the relation to drop. :param str identifier: The identifier of the relation to drop. """ dropped = _make_key(relation) logger.debug('Dropping relation: {!s}'.format(dropped)) with self.lock: self._drop_cascade_relation(dropped)
Drop the named relation and cascade it appropriately to all dependent relations. Because dbt proactively does many `drop relation if exist ... cascade` that are noops, nonexistent relation drops cause a debug log and no other actions. :param str schema: The schema of the relation to drop. :param str identifier: The identifier of the relation to drop.
def from_outcars(cls, outcars, structures, **kwargs): """ Initializes an NEBAnalysis from Outcar and Structure objects. Use the static constructors, e.g., :class:`from_dir` instead if you prefer to have these automatically generated from a directory of NEB calculations. Args: outcars ([Outcar]): List of Outcar objects. Note that these have to be ordered from start to end along reaction coordinates. structures ([Structure]): List of Structures along reaction coordinate. Must be same length as outcar. interpolation_order (int): Order of polynomial to use to interpolate between images. Same format as order parameter in scipy.interplotate.PiecewisePolynomial. """ if len(outcars) != len(structures): raise ValueError("# of Outcars must be same as # of Structures") # Calculate cumulative root mean square distance between structures, # which serves as the reaction coordinate. Note that these are # calculated from the final relaxed structures as the coordinates may # have changed from the initial interpolation. r = [0] prev = structures[0] for st in structures[1:]: dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)]) r.append(np.sqrt(np.sum(dists ** 2))) prev = st r = np.cumsum(r) energies = [] forces = [] for i, o in enumerate(outcars): o.read_neb() energies.append(o.data["energy"]) if i in [0, len(outcars) - 1]: forces.append(0) else: forces.append(o.data["tangent_force"]) forces = np.array(forces) r = np.array(r) return cls(r=r, energies=energies, forces=forces, structures=structures, **kwargs)
Initializes an NEBAnalysis from Outcar and Structure objects. Use the static constructors, e.g., :class:`from_dir` instead if you prefer to have these automatically generated from a directory of NEB calculations. Args: outcars ([Outcar]): List of Outcar objects. Note that these have to be ordered from start to end along reaction coordinates. structures ([Structure]): List of Structures along reaction coordinate. Must be same length as outcar. interpolation_order (int): Order of polynomial to use to interpolate between images. Same format as order parameter in scipy.interplotate.PiecewisePolynomial.
def getItemWidth(self) -> int: """ Only for transactions derived from HArray :return: width of item in original array """ if not isinstance(self.dtype, HArray): raise TypeError() return (self.bitAddrEnd - self.bitAddr) // self.itemCnt
Only for transactions derived from HArray :return: width of item in original array
def get_settings(self): """Returns a mapping of UID -> setting """ settings = self.context.getAnalysisServicesSettings() mapping = dict(map(lambda s: (s.get("uid"), s), settings)) return mapping
Returns a mapping of UID -> setting
def console_load_asc(con: tcod.console.Console, filename: str) -> bool: """Update a console from a non-delimited ASCII `.asc` file.""" return bool( lib.TCOD_console_load_asc(_console(con), filename.encode("utf-8")) )
Update a console from a non-delimited ASCII `.asc` file.
def instance_config_path(cls, project, instance_config): """Return a fully-qualified instance_config string.""" return google.api_core.path_template.expand( "projects/{project}/instanceConfigs/{instance_config}", project=project, instance_config=instance_config, )
Return a fully-qualified instance_config string.
def find(cls, *args, **kwargs): """ Returns all document dicts that pass the filter """ return list(cls.collection.find(*args, **kwargs))
Returns all document dicts that pass the filter
def stop_all(self, run_order=-1): """Runs stop method on all modules less than the passed-in run_order. Used when target is exporting itself mid-build, so we clean up state before committing run files etc. """ shutit_global.shutit_global_object.yield_to_draw() # sort them so they're stopped in reverse order for module_id in self.module_ids(rev=True): shutit_module_obj = self.shutit_map[module_id] if run_order == -1 or shutit_module_obj.run_order <= run_order: if self.is_installed(shutit_module_obj): if not shutit_module_obj.stop(self): self.fail('failed to stop: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child)
Runs stop method on all modules less than the passed-in run_order. Used when target is exporting itself mid-build, so we clean up state before committing run files etc.
def delete(self, tag, ref): """Delete from the vgroup the member identified by its tag and reference number. Args:: tag tag of the member to delete ref reference number of the member to delete Returns:: None Only the link of the member with the vgroup is deleted. The member object is not deleted. C library equivalent : Vdeletatagref """ _checkErr('delete', _C.Vdeletetagref(self._id, tag, ref), "error deleting member")
Delete from the vgroup the member identified by its tag and reference number. Args:: tag tag of the member to delete ref reference number of the member to delete Returns:: None Only the link of the member with the vgroup is deleted. The member object is not deleted. C library equivalent : Vdeletatagref
def _leaf_list_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle leaf-list statement.""" node = LeafListNode() node.type = DataType._resolve_type( stmt.find1("type", required=True), sctx) self._handle_child(node, stmt, sctx)
Handle leaf-list statement.
def _get_opt(config, key, option, opt_type): """Get an option from a configparser with the given type.""" for opt_key in [option, option.replace('-', '_')]: if not config.has_option(key, opt_key): continue if opt_type == bool: return config.getbool(key, opt_key) if opt_type == int: return config.getint(key, opt_key) if opt_type == str: return config.get(key, opt_key) if opt_type == list: return _parse_list_opt(config.get(key, opt_key)) raise ValueError("Unknown option type: %s" % opt_type)
Get an option from a configparser with the given type.
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a blank string, convert it to ``None``. """ for d in dict_list: d[key] = str(d[key]) if d[key] == "": d[key] = None
Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a blank string, convert it to ``None``.
def add_raw(self, raw): """ Adds row aggregation state at the query :param raw: list of raw stages or a dict of raw stage :return: The current object """ if type(raw) == list: self._q += raw if type(raw) == dict: self._q.append(raw) return self
Adds row aggregation state at the query :param raw: list of raw stages or a dict of raw stage :return: The current object
def load_jws_from_request(req): """ This function performs almost entirely bitjws authentication tasks. If valid bitjws message and signature headers are found, then the request will be assigned 'jws_header' and 'jws_payload' attributes. :param req: The flask request to load the jwt claim set from. """ current_app.logger.info("loading request with headers: %s" % req.headers) if (("content-type" in req.headers and "application/jose" in req.headers['content-type']) or ("Content-Type" in req.headers and "application/jose" in req.headers['Content-Type'])): path = urlparse.urlsplit(req.url).path for rule in current_app.url_map.iter_rules(): if path == rule.rule and req.method in rule.methods: dedata = req.get_data().decode('utf8') bp = current_app.bitjws.basepath req.jws_header, req.jws_payload = \ bitjws.validate_deserialize(dedata, requrl=bp + rule.rule) break
This function performs almost entirely bitjws authentication tasks. If valid bitjws message and signature headers are found, then the request will be assigned 'jws_header' and 'jws_payload' attributes. :param req: The flask request to load the jwt claim set from.
def add(self, docs, boost=None, fieldUpdates=None, commit=None, softCommit=False, commitWithin=None, waitFlush=None, waitSearcher=None, overwrite=None, handler='update'): """ Adds or updates documents. Requires ``docs``, which is a list of dictionaries. Each key is the field name and each value is the value to index. Optionally accepts ``commit``. Default is ``None``. None signals to use default Optionally accepts ``softCommit``. Default is ``False``. Optionally accepts ``boost``. Default is ``None``. Optionally accepts ``fieldUpdates``. Default is ``None``. Optionally accepts ``commitWithin``. Default is ``None``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Optionally accepts ``overwrite``. Default is ``None``. Usage:: solr.add([ { "id": "doc_1", "title": "A test document", }, { "id": "doc_2", "title": "The Banana: Tasty or Dangerous?", }, ]) """ start_time = time.time() self.log.debug("Starting to build add request...") message = ElementTree.Element('add') if commitWithin: message.set('commitWithin', commitWithin) for doc in docs: el = self._build_doc(doc, boost=boost, fieldUpdates=fieldUpdates) message.append(el) # This returns a bytestring. Ugh. m = ElementTree.tostring(message, encoding='utf-8') # Convert back to Unicode please. m = force_unicode(m) end_time = time.time() self.log.debug("Built add request of %s docs in %0.2f seconds.", len(message), end_time - start_time) return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, overwrite=overwrite, handler=handler)
Adds or updates documents. Requires ``docs``, which is a list of dictionaries. Each key is the field name and each value is the value to index. Optionally accepts ``commit``. Default is ``None``. None signals to use default Optionally accepts ``softCommit``. Default is ``False``. Optionally accepts ``boost``. Default is ``None``. Optionally accepts ``fieldUpdates``. Default is ``None``. Optionally accepts ``commitWithin``. Default is ``None``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Optionally accepts ``overwrite``. Default is ``None``. Usage:: solr.add([ { "id": "doc_1", "title": "A test document", }, { "id": "doc_2", "title": "The Banana: Tasty or Dangerous?", }, ])
def get_reservations(self, sessionid, timeout=None): """Returns a list of location IDs and names.""" url = "{}{}".format(BASE_URL, "/reservations/") cookies = dict(sessionid=sessionid) try: resp = requests.get(url, timeout=timeout, cookies=cookies) except resp.exceptions.HTTPError as error: raise APIError("Server Error: {}".format(error)) except requests.exceptions.ConnectTimeout: raise APIError("Timeout Error") html = resp.content.decode("utf8") if "https://weblogin.pennkey.upenn.edu" in html: raise APIError("Wharton Auth Failed. Session ID is not valid.") soup = BeautifulSoup(html, "html5lib") reservations = [] media = soup.find_all("div", {'class': "Media-body"}) for res in media: times = res.find_all("span", {'class': "list-view-item__end-time"}) reservation = { "date": res.find("span", {'class': "list-view-item__start-time u-display-block"}).get_text(), "startTime": times[0].get_text(), "endTime": times[1].get_text(), "location": res.find("span", {'class': "list-view-item-building"}).get_text(), "booking_id": int(res.find("a")['href'].split("delete/")[1][:-1]) } reservations.append(reservation) return reservations
Returns a list of location IDs and names.
def between(self, other_user_id): """Check if there is a block between you and the given user. :return: ``True`` if the given user has been blocked :rtype: bool """ params = {'user': self.user_id, 'otherUser': other_user_id} response = self.session.get(self.url, params=params) return response.data['between']
Check if there is a block between you and the given user. :return: ``True`` if the given user has been blocked :rtype: bool
def _EnvOpen(var, mode): """Open a file descriptor identified by an environment variable.""" value = os.getenv(var) if value is None: raise ValueError("%s is not set" % var) fd = int(value) # If running on Windows, convert the file handle to a C file descriptor; see: # https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4 if _WINDOWS: fd = msvcrt.open_osfhandle(fd, 0) return os.fdopen(fd, mode)
Open a file descriptor identified by an environment variable.
def get_account_policy(region=None, key=None, keyid=None, profile=None): ''' Get account policy for the AWS account. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_account_policy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.get_account_password_policy() return info.get_account_password_policy_response.get_account_password_policy_result.password_policy except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to update the password policy.' log.error(msg) return False
Get account policy for the AWS account. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_account_policy
def _send_broker_aware_request(self, payloads, encoder_fn, decoder_fn): """ Group a list of request payloads by topic+partition and send them to the leader broker for that partition using the supplied encode/decode functions Arguments: payloads: list of object-like entities with a topic (str) and partition (int) attribute; payloads with duplicate topic-partitions are not supported. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes Returns: List of response objects in the same order as the supplied payloads """ # encoders / decoders do not maintain ordering currently # so we need to keep this so we can rebuild order before returning original_ordering = [(p.topic, p.partition) for p in payloads] # Group the requests by topic+partition brokers_for_payloads = [] payloads_by_broker = collections.defaultdict(list) responses = {} for payload in payloads: try: leader = self._get_leader_for_partition(payload.topic, payload.partition) payloads_by_broker[leader].append(payload) brokers_for_payloads.append(leader) except KafkaUnavailableError as e: log.warning('KafkaUnavailableError attempting to send request ' 'on topic %s partition %d', payload.topic, payload.partition) topic_partition = (payload.topic, payload.partition) responses[topic_partition] = FailedPayloadsError(payload) # For each broker, send the list of request payloads # and collect the responses and errors broker_failures = [] # For each KafkaConnection keep the real socket so that we can use # a select to perform unblocking I/O connections_by_socket = {} for broker, payloads in payloads_by_broker.items(): requestId = self._next_id() log.debug('Request %s to %s: %s', requestId, broker, payloads) request = encoder_fn(client_id=self.client_id, correlation_id=requestId, payloads=payloads) # Send the request, recv the response try: conn = self._get_conn(broker.host.decode('utf-8'), broker.port) conn.send(requestId, request) except ConnectionError as e: broker_failures.append(broker) log.warning('ConnectionError attempting to send request %s ' 'to server %s: %s', requestId, broker, e) for payload in payloads: topic_partition = (payload.topic, payload.partition) responses[topic_partition] = FailedPayloadsError(payload) # No exception, try to get response else: # decoder_fn=None signal that the server is expected to not # send a response. This probably only applies to # ProduceRequest w/ acks = 0 if decoder_fn is None: log.debug('Request %s does not expect a response ' '(skipping conn.recv)', requestId) for payload in payloads: topic_partition = (payload.topic, payload.partition) responses[topic_partition] = None continue else: connections_by_socket[conn.get_connected_socket()] = (conn, broker, requestId) conn = None while connections_by_socket: sockets = connections_by_socket.keys() rlist, _, _ = select.select(sockets, [], [], None) conn, broker, requestId = connections_by_socket.pop(rlist[0]) try: response = conn.recv(requestId) except ConnectionError as e: broker_failures.append(broker) log.warning('ConnectionError attempting to receive a ' 'response to request %s from server %s: %s', requestId, broker, e) for payload in payloads_by_broker[broker]: topic_partition = (payload.topic, payload.partition) responses[topic_partition] = FailedPayloadsError(payload) else: _resps = [] for payload_response in decoder_fn(response): topic_partition = (payload_response.topic, payload_response.partition) responses[topic_partition] = payload_response _resps.append(payload_response) log.debug('Response %s: %s', requestId, _resps) # Connection errors generally mean stale metadata # although sometimes it means incorrect api request # Unfortunately there is no good way to tell the difference # so we'll just reset metadata on all errors to be safe if broker_failures: self.reset_all_metadata() # Return responses in the same order as provided return [responses[tp] for tp in original_ordering]
Group a list of request payloads by topic+partition and send them to the leader broker for that partition using the supplied encode/decode functions Arguments: payloads: list of object-like entities with a topic (str) and partition (int) attribute; payloads with duplicate topic-partitions are not supported. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes Returns: List of response objects in the same order as the supplied payloads