code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def decluster(self, trig_int, timing='detect', metric='avg_cor'): """ De-cluster a Party of detections by enforcing a detection separation. De-clustering occurs between events detected by different (or the same) templates. If multiple detections occur within trig_int then the preferred detection will be determined by the metric argument. This can be either the average single-station correlation coefficient which is calculated as Detection.detect_val / Detection.no_chans, or the raw cross channel correlation sum which is simply Detection.detect_val. :type trig_int: float :param trig_int: Minimum detection separation in seconds. :type metric: str :param metric: What metric to sort peaks by. Either 'avg_cor' which takes the single station average correlation or 'cor_sum' which takes the total correlation sum across all channels. :type timing: str :param timing: Either 'detect' or 'origin' to decluster based on either the detection time or the origin time. .. Warning:: Works in place on object, if you need to keep the original safe then run this on a copy of the object! .. rubric:: Example >>> party = Party().read() >>> len(party) 4 >>> declustered = party.decluster(20) >>> len(party) 3 """ all_detections = [] for fam in self.families: all_detections.extend(fam.detections) if timing == 'detect': if metric == 'avg_cor': detect_info = [(d.detect_time, d.detect_val / d.no_chans) for d in all_detections] elif metric == 'cor_sum': detect_info = [(d.detect_time, d.detect_val) for d in all_detections] else: raise MatchFilterError('metric is not cor_sum or avg_cor') elif timing == 'origin': if metric == 'avg_cor': detect_info = [(_get_origin(d.event).time, d.detect_val / d.no_chans) for d in all_detections] elif metric == 'cor_sum': detect_info = [(_get_origin(d.event).time, d.detect_val) for d in all_detections] else: raise MatchFilterError('metric is not cor_sum or avg_cor') else: raise MatchFilterError('timing is not detect or origin') min_det = sorted([d[0] for d in detect_info])[0] detect_vals = np.array([d[1] for d in detect_info]) detect_times = np.array([ _total_microsec(d[0].datetime, min_det.datetime) for d in detect_info]) # Trig_int must be converted from seconds to micro-seconds peaks_out = decluster( peaks=detect_vals, index=detect_times, trig_int=trig_int * 10 ** 6) # Need to match both the time and the detection value declustered_detections = [] for ind in peaks_out: matching_time_indeces = np.where(detect_times == ind[-1])[0] matches = matching_time_indeces[ np.where(detect_vals[matching_time_indeces] == ind[0])[0][0]] declustered_detections.append(all_detections[matches]) # Convert this list into families template_names = list(set([d.template_name for d in declustered_detections])) new_families = [] for template_name in template_names: template = [fam.template for fam in self.families if fam.template.name == template_name][0] new_families.append(Family( template=template, detections=[d for d in declustered_detections if d.template_name == template_name])) self.families = new_families return self
De-cluster a Party of detections by enforcing a detection separation. De-clustering occurs between events detected by different (or the same) templates. If multiple detections occur within trig_int then the preferred detection will be determined by the metric argument. This can be either the average single-station correlation coefficient which is calculated as Detection.detect_val / Detection.no_chans, or the raw cross channel correlation sum which is simply Detection.detect_val. :type trig_int: float :param trig_int: Minimum detection separation in seconds. :type metric: str :param metric: What metric to sort peaks by. Either 'avg_cor' which takes the single station average correlation or 'cor_sum' which takes the total correlation sum across all channels. :type timing: str :param timing: Either 'detect' or 'origin' to decluster based on either the detection time or the origin time. .. Warning:: Works in place on object, if you need to keep the original safe then run this on a copy of the object! .. rubric:: Example >>> party = Party().read() >>> len(party) 4 >>> declustered = party.decluster(20) >>> len(party) 3
def check_user_can_comment(recID, client_ip_address, uid=-1): """ Check if a user hasn't already commented within the last seconds time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS :param recID: record id :param client_ip_address: IP => use: str(req.remote_ip) :param uid: user id, as given by invenio.legacy.webuser.getUid(req) """ recID = wash_url_argument(recID, 'int') client_ip_address = wash_url_argument(client_ip_address, 'str') uid = wash_url_argument(uid, 'int') max_action_time = time.time() - \ CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS max_action_time = convert_datestruct_to_datetext( time.localtime(max_action_time)) action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_COMMENT'] query = """SELECT id_bibrec FROM "cmtACTIONHISTORY" WHERE id_bibrec=%s AND action_code=%s AND action_time>%s """ params = (recID, action_code, max_action_time) if uid < 0: query += " AND client_host=inet_aton(%s)" params += (client_ip_address,) else: query += " AND id_user=%s" params += (uid,) res = run_sql(query, params) return len(res) == 0
Check if a user hasn't already commented within the last seconds time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS :param recID: record id :param client_ip_address: IP => use: str(req.remote_ip) :param uid: user id, as given by invenio.legacy.webuser.getUid(req)
def getValue(self, row): 'Memoize calcValue with key id(row)' if self._cachedValues is None: return self.calcValue(row) k = id(row) if k in self._cachedValues: return self._cachedValues[k] ret = self.calcValue(row) self._cachedValues[k] = ret cachesize = options.col_cache_size if cachesize > 0 and len(self._cachedValues) > cachesize: self._cachedValues.popitem(last=False) return ret
Memoize calcValue with key id(row)
def _assert_executable_regions_match(workflow_enabled_regions, workflow_spec): """ Check if the global workflow regions and the regions of stages (apps) match. If the workflow contains any applets, the workflow can be currently enabled in only one region - the region in which the applets are stored. """ executables = [i.get("executable") for i in workflow_spec.get("stages")] for exect in executables: if exect.startswith("applet-") and len(workflow_enabled_regions) > 1: raise WorkflowBuilderException("Building a global workflow with applets in more than one region is not yet supported.") elif exect.startswith("app-"): app_regional_options = dxpy.api.app_describe(exect, input_params={"fields": {"regionalOptions": True}}) app_regions = set(app_regional_options['regionalOptions'].keys()) if not workflow_enabled_regions.issubset(app_regions): additional_workflow_regions = workflow_enabled_regions - app_regions mesg = "The app {} is enabled in regions {} while the global workflow in {}.".format( exect, ", ".join(app_regions), ", ".join(workflow_enabled_regions)) mesg += " The workflow will not be able to run in {}.".format(", ".join(additional_workflow_regions)) mesg += " If you are a developer of the app, you can enable the app in {} to run the workflow in that region(s).".format( ", ".join(additional_workflow_regions)) logger.warn(mesg) elif exect.startswith("workflow-"): # We recurse to check the regions of the executables of the inner workflow inner_workflow_spec = dxpy.api.workflow_describe(exect) _assert_executable_regions_match(workflow_enabled_regions, inner_workflow_spec) elif exect.startswith("globalworkflow-"): raise WorkflowBuilderException("Building a global workflow with nested global workflows is not yet supported")
Check if the global workflow regions and the regions of stages (apps) match. If the workflow contains any applets, the workflow can be currently enabled in only one region - the region in which the applets are stored.
def exp(self): """ Returns the exponent of the quaternion. (not tested) """ # Init vecNorm = self.x**2 + self.y**2 + self.z**2 wPart = np.exp(self.w) q = Quaternion() # Calculate q.w = wPart * np.cos(vecNorm) q.x = wPart * self.x * np.sin(vecNorm) / vecNorm q.y = wPart * self.y * np.sin(vecNorm) / vecNorm q.z = wPart * self.z * np.sin(vecNorm) / vecNorm return q
Returns the exponent of the quaternion. (not tested)
def decode(self, X, lengths=None, algorithm=None): """Find most likely state sequence corresponding to ``X``. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. algorithm : string Decoder algorithm. Must be one of "viterbi" or "map". If not given, :attr:`decoder` is used. Returns ------- logprob : float Log probability of the produced state sequence. state_sequence : array, shape (n_samples, ) Labels for each sample from ``X`` obtained via a given decoder ``algorithm``. See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ check_is_fitted(self, "startprob_") self._check() algorithm = algorithm or self.algorithm if algorithm not in DECODER_ALGORITHMS: raise ValueError("Unknown decoder {!r}".format(algorithm)) decoder = { "viterbi": self._decode_viterbi, "map": self._decode_map }[algorithm] X = check_array(X) n_samples = X.shape[0] logprob = 0 state_sequence = np.empty(n_samples, dtype=int) for i, j in iter_from_X_lengths(X, lengths): # XXX decoder works on a single sample at a time! logprobij, state_sequenceij = decoder(X[i:j]) logprob += logprobij state_sequence[i:j] = state_sequenceij return logprob, state_sequence
Find most likely state sequence corresponding to ``X``. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. algorithm : string Decoder algorithm. Must be one of "viterbi" or "map". If not given, :attr:`decoder` is used. Returns ------- logprob : float Log probability of the produced state sequence. state_sequence : array, shape (n_samples, ) Labels for each sample from ``X`` obtained via a given decoder ``algorithm``. See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model.
def findUnique(self, tableClass, comparison=None, default=_noItem): """ Find an Item in the database which should be unique. If it is found, return it. If it is not found, return 'default' if it was passed, otherwise raise L{errors.ItemNotFound}. If more than one item is found, raise L{errors.DuplicateUniqueItem}. @param comparison: implementor of L{iaxiom.IComparison}. @param default: value to use if the item is not found. """ results = list(self.query(tableClass, comparison, limit=2)) lr = len(results) if lr == 0: if default is _noItem: raise errors.ItemNotFound(comparison) else: return default elif lr == 2: raise errors.DuplicateUniqueItem(comparison, results) elif lr == 1: return results[0] else: raise AssertionError("limit=2 database query returned 3+ results: ", comparison, results)
Find an Item in the database which should be unique. If it is found, return it. If it is not found, return 'default' if it was passed, otherwise raise L{errors.ItemNotFound}. If more than one item is found, raise L{errors.DuplicateUniqueItem}. @param comparison: implementor of L{iaxiom.IComparison}. @param default: value to use if the item is not found.
def filesystem_from_config_dict(config_fs): """ Given a dict containing an entry "module" which contains a FSProvider identifier, parse the configuration and returns a fs_provider. Exits if there is an error. """ if "module" not in config_fs: print("Key 'module' should be defined for the filesystem provider ('fs' configuration option)", file=sys.stderr) exit(1) filesystem_providers = get_filesystems_providers() if config_fs["module"] not in filesystem_providers: print("Unknown filesystem provider "+config_fs["module"], file=sys.stderr) exit(1) fs_class = filesystem_providers[config_fs["module"]] fs_args_needed = fs_class.get_needed_args() fs_args = {} for arg_name, (arg_type, arg_required, _) in fs_args_needed.items(): if arg_name in config_fs: fs_args[arg_name] = arg_type(config_fs[arg_name]) elif arg_required: print("fs option {} is required".format(arg_name), file=sys.stderr) exit(1) try: return fs_class.init_from_args(**fs_args) except: print("Unable to load class " + config_fs["module"], file=sys.stderr) raise
Given a dict containing an entry "module" which contains a FSProvider identifier, parse the configuration and returns a fs_provider. Exits if there is an error.
def parent(self): """return the parent URL, with params, query, and fragment in place""" path = '/'.join(self.path.split('/')[:-1]) s = path.strip('/').split(':') if len(s)==2 and s[1]=='': return None else: return self.__class__(self, path=path)
return the parent URL, with params, query, and fragment in place
def send(self, msg): """Send the given message. """ with self._pub_lock: self.publish.send_string(msg) return self
Send the given message.
def edit(dataset_uri): """Default editor updating of readme content. """ try: dataset = dtoolcore.ProtoDataSet.from_uri( uri=dataset_uri, config_path=CONFIG_PATH ) except dtoolcore.DtoolCoreTypeError: dataset = dtoolcore.DataSet.from_uri( uri=dataset_uri, config_path=CONFIG_PATH ) readme_content = dataset.get_readme_content() try: # Python2 compatibility. readme_content = unicode(readme_content, "utf-8") except NameError: pass edited_content = click.edit(readme_content) if edited_content is not None: _validate_and_put_readme(dataset, edited_content) click.secho("Updated readme ", nl=False, fg="green") else: click.secho("Did not update readme ", nl=False, fg="red") click.secho(dataset_uri)
Default editor updating of readme content.
def start(self, *args, **kwargs): """ Start to read the stream(s). """ queue = Queue() stdout_reader, stderr_reader = \ self._create_readers(queue, *args, **kwargs) self.thread = threading.Thread(target=self._read, args=(stdout_reader, stderr_reader, queue)) self.thread.daemon = True self.thread.start()
Start to read the stream(s).
def getEdges(self, edges, inEdges = True, outEdges = True, rawResults = False) : """returns in, out, or both edges linked to self belonging the collection 'edges'. If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects""" try : return edges.getEdges(self, inEdges, outEdges, rawResults) except AttributeError : raise AttributeError("%s does not seem to be a valid Edges object" % edges)
returns in, out, or both edges linked to self belonging the collection 'edges'. If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects
def get_transaction(self, transaction_id): """Returns a Transaction object from the block store by its id. Params: transaction_id (str): The header_signature of the desired txn Returns: Transaction: The specified transaction Raises: ValueError: The transaction is not in the block store """ payload = self._get_data_by_id( transaction_id, 'commit_store_get_transaction') txn = Transaction() txn.ParseFromString(payload) return txn
Returns a Transaction object from the block store by its id. Params: transaction_id (str): The header_signature of the desired txn Returns: Transaction: The specified transaction Raises: ValueError: The transaction is not in the block store
def get_subpackages_names(dir_): """Figures out the names of the subpackages of a package Args: dir_: (str) path to package directory Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package """ def is_package(d): d = os.path.join(dir_, d) return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*')) ret = list(filter(is_package, os.listdir(dir_))) ret.sort() return ret
Figures out the names of the subpackages of a package Args: dir_: (str) path to package directory Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
def brackets_insanity_check(p_string): """ This function performs a check for different number of '(' and ')' characters, which indicates that some forks are poorly constructed. Parameters ---------- p_string: str String with the definition of the pipeline, e.g.:: 'processA processB processC(ProcessD | ProcessE)' """ if p_string.count(FORK_TOKEN) != p_string.count(CLOSE_TOKEN): # get the number of each type of bracket and state the one that has a # higher value dict_values = { FORK_TOKEN: p_string.count(FORK_TOKEN), CLOSE_TOKEN: p_string.count(CLOSE_TOKEN) } max_bracket = max(dict_values, key=dict_values.get) raise SanityError( "A different number of '(' and ')' was specified. There are " "{} extra '{}'. The number of '(' and ')'should be equal.".format( str(abs( p_string.count(FORK_TOKEN) - p_string.count(CLOSE_TOKEN))), max_bracket))
This function performs a check for different number of '(' and ')' characters, which indicates that some forks are poorly constructed. Parameters ---------- p_string: str String with the definition of the pipeline, e.g.:: 'processA processB processC(ProcessD | ProcessE)'
def unravel_staff(staff_data): """Unravels staff role dictionary into flat list of staff members with ``role`` set as an attribute. Args: staff_data(dict): Data return from py:method::get_staff Returns: list: Flat list of staff members with ``role`` set to role type (i.e. course_admin, instructor, TA, etc) """ staff_list = [] for role, staff_members in staff_data['data'].items(): for member in staff_members: member['role'] = role staff_list.append(member) return staff_list
Unravels staff role dictionary into flat list of staff members with ``role`` set as an attribute. Args: staff_data(dict): Data return from py:method::get_staff Returns: list: Flat list of staff members with ``role`` set to role type (i.e. course_admin, instructor, TA, etc)
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False, sort=False, key=None, separator='&'): """Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation. """ gen = _url_encode_impl(obj, charset, encode_keys, sort, key) if stream is None: return gen for idx, chunk in enumerate(gen): if idx: stream.write(separator) stream.write(chunk)
Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation.
def get_ribo_counts(ribo_fileobj, transcript_name, read_lengths, read_offsets): """For each mapped read of the given transcript in the BAM file (pysam AlignmentFile object), return the position (+1) and the corresponding frame (1, 2 or 3) to which it aligns. Keyword arguments: ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile transcript_name -- Name of transcript to get counts for read_length (optional) -- If provided, get counts only for reads of this length. """ read_counts = {} total_reads = 0 for record in ribo_fileobj.fetch(transcript_name): query_length = record.query_length position_ref = record.pos + 1 for index, read_length in enumerate(read_lengths): position = position_ref # reset position if read_length == 0 or read_length == query_length: # if an offset is specified, increment position by that offset. position += read_offsets[index] else: # ignore other reads/lengths continue total_reads += 1 try: read_counts[position] except KeyError: read_counts[position] = {1: 0, 2: 0, 3: 0} # calculate the frame of the read from position rem = position % 3 if rem == 0: read_counts[position][3] += 1 else: read_counts[position][rem] += 1 log.debug('Total read counts: {}'.format(total_reads)) log.debug('RiboSeq read counts for transcript: {0}\n{1}'.format(transcript_name, read_counts)) return read_counts, total_reads
For each mapped read of the given transcript in the BAM file (pysam AlignmentFile object), return the position (+1) and the corresponding frame (1, 2 or 3) to which it aligns. Keyword arguments: ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile transcript_name -- Name of transcript to get counts for read_length (optional) -- If provided, get counts only for reads of this length.
def comp_meta(file_bef, file_aft, mode="pfail"): """Compare chunk meta, mode=[pfail, power, reboot]""" if env(): cij.err("cij.nvme.comp_meta: Invalid NVMe ENV.") return 1 nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED) num_chk = int(nvme["LNVM_TOTAL_CHUNKS"]) meta_bef = cij.bin.Buffer(types=get_descriptor_table(nvme['SPEC_VERSION']), length=num_chk) meta_aft = cij.bin.Buffer(types=get_descriptor_table(nvme['SPEC_VERSION']), length=num_chk) meta_bef.read(file_bef) meta_aft.read(file_aft) for chk in range(num_chk): ignore = ["WL", "RSV0"] # PFAIL: BEFORE IS OPEN CHUNK, WRITE POINTER IS NOT SURE, IGNORE if mode == "pfail" and meta_bef[chk].CS == 4: ignore.append("WP") # COMPARE CHUNK META if meta_bef.compare(meta_aft, chk, ignore=ignore): cij.warn("META_BUFF_BEF[%s]:" % chk) meta_bef.dump(chk) cij.warn("META_BUFF_AFT[%s]:" % chk) meta_aft.dump(chk) cij.err("Error compare, CHUNK: %s" % chk) return 1 return 0
Compare chunk meta, mode=[pfail, power, reboot]
def registerAccountResponse(self, person, vendorSpecific=None): """CNIdentity.registerAccount(session, person) → Subject https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.registerAccount. Args: person: vendorSpecific: Returns: """ mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))} return self.POST('accounts', fields=mmp_dict, headers=vendorSpecific)
CNIdentity.registerAccount(session, person) → Subject https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.registerAccount. Args: person: vendorSpecific: Returns:
def return_cursor(self, dataout, sx, sy, frame, wcs, key, strval=''): """ writes the cursor position to dataout. input: dataout: the output stream sx: x coordinate sy: y coordinate wcs: nonzero if we want WCS translation frame: frame buffer index key: keystroke used as trigger strval: optional string value """ #print "RETURN CURSOR" wcscode = (frame + 1) * 100 + wcs if (key == '\32'): curval = "EOF" else: if (key in string.printable and key not in string.whitespace): keystr = key else: keystr = "\\%03o" % (ord(key)) # send the necessary infor to the client curval = "%10.3f %10.3f %d %s %s\n" % (sx, sy, wcscode, keystr, strval) dataout.write(right_pad(curval, SZ_IMCURVAL))
writes the cursor position to dataout. input: dataout: the output stream sx: x coordinate sy: y coordinate wcs: nonzero if we want WCS translation frame: frame buffer index key: keystroke used as trigger strval: optional string value
def stream_stats(self): """ Display basic statistics of callback execution: ideal period between callbacks, average measured period between callbacks, and average time spent in the callback. """ Tp = self.frame_length/float(self.fs)*1000 print('Delay (latency) in Entering the Callback the First Time = %6.2f (ms)' \ % (self.DSP_tic[0]*1000,)) print('Ideal Callback period = %1.2f (ms)' % Tp) Tmp_mean = np.mean(np.diff(np.array(self.DSP_tic))[1:]*1000) print('Average Callback Period = %1.2f (ms)' % Tmp_mean) Tprocess_mean = np.mean(np.array(self.DSP_toc)-np.array(self.DSP_tic))*1000 print('Average Callback process time = %1.2f (ms)' % Tprocess_mean)
Display basic statistics of callback execution: ideal period between callbacks, average measured period between callbacks, and average time spent in the callback.
def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """ app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json
def build_structure(self, component, runnable, structure): """ Adds structure to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure """ if self.debug: print("\n++++++++ Calling build_structure of %s with runnable %s, parent %s"%(component.id, runnable.id, runnable.parent)) # Process single-child instantiations for ch in structure.child_instances: child_runnable = self.build_runnable(ch.referenced_component, runnable) runnable.add_child(child_runnable.id, child_runnable) runnable.add_child_typeref(ch.component, child_runnable) # Process multi-child instatiantions for mi in structure.multi_instantiates: template = self.build_runnable(mi.component, runnable) for i in range(mi.number): #instance = copy.deepcopy(template) instance = template.copy() instance.id = "{0}__{1}__{2}".format(component.id, template.id, i) runnable.array.append(instance) # Process foreach statements for fe in structure.for_eachs: self.build_foreach(component, runnable, fe) self.build_event_connections(component, runnable, structure)
Adds structure to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure
def open_sensor( self, input_source: str, output_dest: Optional[str] = None, extra_cmd: Optional[str] = None, ) -> Coroutine: """Open FFmpeg process for read autio stream. Return a coroutine. """ command = ["-vn", "-filter:a", "silencedetect=n={}dB:d=1".format(self._peak)] # run ffmpeg, read output return self.start_worker( cmd=command, input_source=input_source, output=output_dest, extra_cmd=extra_cmd, pattern="silence", )
Open FFmpeg process for read autio stream. Return a coroutine.
def parse_config_file(path: str, final: bool = True) -> None: """Parses global options from a config file. See `OptionParser.parse_config_file`. """ return options.parse_config_file(path, final=final)
Parses global options from a config file. See `OptionParser.parse_config_file`.
def makeSer(segID, N, CA, C, O, geo): '''Creates a Serine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_OG_length=geo.CB_OG_length CA_CB_OG_angle=geo.CA_CB_OG_angle N_CA_CB_OG_diangle=geo.N_CA_CB_OG_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") oxygen_g= calculateCoordinates(N, CA, CB, CB_OG_length, CA_CB_OG_angle, N_CA_CB_OG_diangle) OG= Atom("OG", oxygen_g, 0.0, 1.0, " ", " OG", 0, "O") ##Create Reside Data Structure res= Residue((' ', segID, ' '), "SER", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(OG) ##print(res) return res
Creates a Serine residue
def from_settings(cls, settings=None, storage=None): """Create from settings. Parameters ---------- settings : `dict`, optional Settings (default: None). storage : `type`, optional Storage class (default: cls.DEFAULT_STORAGE) Returns ------- `markovchain.Markov` """ if storage is None: storage = cls.DEFAULT_STORAGE return cls.from_storage(storage(settings=settings))
Create from settings. Parameters ---------- settings : `dict`, optional Settings (default: None). storage : `type`, optional Storage class (default: cls.DEFAULT_STORAGE) Returns ------- `markovchain.Markov`
def nucleotide_range(self): '''Returns the nucleotide (start, end) positions inclusive of this variant. start==end if it's an amino acid variant, otherwise start+2==end''' if self.variant_type == 'p': return 3 * self.position, 3 * self.position + 2 else: return self.position, self.position
Returns the nucleotide (start, end) positions inclusive of this variant. start==end if it's an amino acid variant, otherwise start+2==end
def QA_fetch_future_min( code, start, end, format='numpy', frequence='1min', collections=DATABASE.future_min): '获取股票分钟线' if frequence in ['1min', '1m']: frequence = '1min' elif frequence in ['5min', '5m']: frequence = '5min' elif frequence in ['15min', '15m']: frequence = '15min' elif frequence in ['30min', '30m']: frequence = '30min' elif frequence in ['60min', '60m']: frequence = '60min' __data = [] code = QA_util_code_tolist(code, auto_fill=False) cursor = collections.find({ 'code': {'$in': code}, "time_stamp": { "$gte": QA_util_time_stamp(start), "$lte": QA_util_time_stamp(end) }, 'type': frequence }, batch_size=10000) if format in ['dict', 'json']: return [data for data in cursor] for item in cursor: __data.append([str(item['code']), float(item['open']), float(item['high']), float( item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']), item['datetime'], item['tradetime'], item['time_stamp'], item['date'], item['type']]) __data = DataFrame(__data, columns=[ 'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'datetime', 'tradetime', 'time_stamp', 'date', 'type']) __data['datetime'] = pd.to_datetime(__data['datetime']) __data = __data.set_index('datetime', drop=False) if format in ['numpy', 'np', 'n']: return numpy.asarray(__data) elif format in ['list', 'l', 'L']: return numpy.asarray(__data).tolist() elif format in ['P', 'p', 'pandas', 'pd']: return __data
获取股票分钟线
def _default_child_lookup(self, name): """ Return an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` containing the children found having the given name :type name: ``str`` :param name: the name of the children (e.g. PID) :return: an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` containing the results """ if name in self.indexes or name in self.traversal_indexes: try: return self.proxies[name] except KeyError: self.proxies[name] = ElementProxy(self, name) return self.proxies[name] else: # child not found in the indexes dictionary (e.g. msh_9.message_code, msh_9.msh_9_1) child_name = self._find_name(name) if child_name is not None: try: return self.proxies[child_name] except KeyError: self.proxies[child_name] = ElementProxy(self, child_name) return self.proxies[child_name]
Return an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` containing the children found having the given name :type name: ``str`` :param name: the name of the children (e.g. PID) :return: an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` containing the results
def initialize_plugins(self): """Attempt to Load and initialize all the plugins. Any issues loading plugins will be output to stderr. """ for plugin_name in self.options.plugin: parts = plugin_name.split('.') if len(parts) > 1: module_name = '.'.join(parts[:-1]) class_name = parts[-1] else: # Use the titlecase format of the module name as the class name module_name = parts[0] class_name = parts[0].title() # First try to load plugins from the passed in plugins_dir and then # from the hairball.plugins package. plugin = None for package in (None, 'hairball.plugins'): if package: module_name = '{}.{}'.format(package, module_name) try: module = __import__(module_name, fromlist=[class_name]) # Initializes the plugin by calling its constructor plugin = getattr(module, class_name)() # Verify plugin is of the correct class if not isinstance(plugin, HairballPlugin): sys.stderr.write('Invalid type for plugin {}: {}\n' .format(plugin_name, type(plugin))) plugin = None else: break except (ImportError, AttributeError): pass if plugin: self.plugins.append(plugin) else: sys.stderr.write('Cannot find plugin {}\n'.format(plugin_name)) if not self.plugins: sys.stderr.write('No plugins loaded. Goodbye!\n') sys.exit(1)
Attempt to Load and initialize all the plugins. Any issues loading plugins will be output to stderr.
def add_lnTo(self, x, y): """Return a newly created `a:lnTo` subtree with end point *(x, y)*. The new `a:lnTo` element is appended to this `a:path` element. """ lnTo = self._add_lnTo() pt = lnTo._add_pt() pt.x, pt.y = x, y return lnTo
Return a newly created `a:lnTo` subtree with end point *(x, y)*. The new `a:lnTo` element is appended to this `a:path` element.
def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser
Create an argparse.ArgumentParser for run_mujoco.py.
def _set_clear_mpls_ldp_statistics(self, v, load=False): """ Setter method for clear_mpls_ldp_statistics, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_ldp_statistics (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_ldp_statistics is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_ldp_statistics() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=clear_mpls_ldp_statistics.clear_mpls_ldp_statistics, is_leaf=True, yang_name="clear-mpls-ldp-statistics", rest_name="clear-mpls-ldp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsLdp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """clear_mpls_ldp_statistics must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=clear_mpls_ldp_statistics.clear_mpls_ldp_statistics, is_leaf=True, yang_name="clear-mpls-ldp-statistics", rest_name="clear-mpls-ldp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsLdp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__clear_mpls_ldp_statistics = t if hasattr(self, '_set'): self._set()
Setter method for clear_mpls_ldp_statistics, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_ldp_statistics (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_ldp_statistics is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_ldp_statistics() directly.
def _recv_robust(self, sock, size): """ Receive size from sock, and retry if the recv() call was interrupted. (this is only required for python2 compatability) """ while True: try: return sock.recv(size) except socket.error as e: if e.errno != errno.EINTR: raise
Receive size from sock, and retry if the recv() call was interrupted. (this is only required for python2 compatability)
def info(self, set_info=None, followlinks=True): """ info: backend info about self (probably not implemented for all backends. The result will be backend specific). @param set_info: If not None, this data will be set on self. @param followlinks: If True, symlinks will be followed. If False, the info of the symlink itself will be returned. (Default: True) @rtype: Bunch @return: backend specific information about self. """ return self.connection.info(self, set_info=set_info, followlinks=followlinks)
info: backend info about self (probably not implemented for all backends. The result will be backend specific). @param set_info: If not None, this data will be set on self. @param followlinks: If True, symlinks will be followed. If False, the info of the symlink itself will be returned. (Default: True) @rtype: Bunch @return: backend specific information about self.
def chassis(self): """Get list of chassis known to test session.""" self._check_session() status, data = self._rest.get_request('chassis') return data
Get list of chassis known to test session.
def norm_l1(x, axis=None): r"""Compute the :math:`\ell_1` norm .. math:: \| \mathbf{x} \|_1 = \sum_i | x_i | where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`. Parameters ---------- x : array_like Input array :math:`\mathbf{x}` axis : `None` or int or tuple of ints, optional (default None) Axes of `x` over which to compute the :math:`\ell_1` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct values are computed over the indices of the remaining axes of input array `x`. Returns ------- nl1 : float or ndarray Norm of `x`, or array of norms treating specified axes of `x` as a vector """ nl1 = np.sum(np.abs(x), axis=axis, keepdims=True) # If the result has a single element, convert it to a scalar if nl1.size == 1: nl1 = nl1.ravel()[0] return nl1
r"""Compute the :math:`\ell_1` norm .. math:: \| \mathbf{x} \|_1 = \sum_i | x_i | where :math:`x_i` is element :math:`i` of vector :math:`\mathbf{x}`. Parameters ---------- x : array_like Input array :math:`\mathbf{x}` axis : `None` or int or tuple of ints, optional (default None) Axes of `x` over which to compute the :math:`\ell_1` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct values are computed over the indices of the remaining axes of input array `x`. Returns ------- nl1 : float or ndarray Norm of `x`, or array of norms treating specified axes of `x` as a vector
def __lock_location(self) -> None: """ Attempts to lock the location used by this writer. Will raise an error if the location is already locked by another writer. Will do nothing if the location is already locked by this writer. """ if not self._is_active: if self._location in LogdirWriter._locked_locations: raise RuntimeError('TensorBoard event file in directory %s with suffix %s ' 'is already in use. At present multiple TensoBoard file writers ' 'cannot write data into the same file.' % self._location) LogdirWriter._locked_locations.add(self._location) self._is_active = True
Attempts to lock the location used by this writer. Will raise an error if the location is already locked by another writer. Will do nothing if the location is already locked by this writer.
def start_stack(self, stack): """启动服务组 启动服务组中的所有停止状态的服务。 Args: - stack: 服务所属的服务组名称 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/stacks/{1}/start'.format(self.host, stack) return self.__post(url)
启动服务组 启动服务组中的所有停止状态的服务。 Args: - stack: 服务所属的服务组名称 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
def friendly_type_name(raw_type: typing.Type) -> str: """ Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string """ try: return _TRANSLATE_TYPE[raw_type] except KeyError: LOGGER.error('unmanaged value type: %s', raw_type) return str(raw_type)
Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None): """ - Parse HTML and get miniDOM - Extract CSS informations, add default CSS, parse CSS - Handle the document DOM itself and build reportlab story - Return Context object """ global CSSAttrCache CSSAttrCache = {} if xhtml: # TODO: XHTMLParser doesn't see to exist... parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom")) else: parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom")) if isinstance(src, six.text_type): # If an encoding was provided, do not change it. if not encoding: encoding = "utf-8" src = src.encode(encoding) src = pisaTempFile(src, capacity=context.capacity) # # Test for the restrictions of html5lib # if encoding: # # Workaround for html5lib<0.11.1 # if hasattr(inputstream, "isValidEncoding"): # if encoding.strip().lower() == "utf8": # encoding = "utf-8" # if not inputstream.isValidEncoding(encoding): # log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding) # else: # if inputstream.codecName(encoding) is None: # log.error("%r is not a valid encoding", encoding) document = parser.parse( src, ) # encoding=encoding) if xml_output: if encoding: xml_output.write(document.toprettyxml(encoding=encoding)) else: xml_output.write(document.toprettyxml(encoding="utf8")) if default_css: context.addDefaultCSS(default_css) pisaPreLoop(document, context) # try: context.parseCSS() # except: # context.cssText = DEFAULT_CSS # context.parseCSS() # context.debug(9, pprint.pformat(context.css)) pisaLoop(document, context) return context
- Parse HTML and get miniDOM - Extract CSS informations, add default CSS, parse CSS - Handle the document DOM itself and build reportlab story - Return Context object
def set_range(self, minimum, maximum): """ Set a range. The range is passed unchanged to the rangeChanged member function. :param minimum: minimum value of the range (None if no percentage is required) :param maximum: maximum value of the range (None if no percentage is required) """ self._min = minimum self._max = maximum self.on_rangeChange(minimum, maximum)
Set a range. The range is passed unchanged to the rangeChanged member function. :param minimum: minimum value of the range (None if no percentage is required) :param maximum: maximum value of the range (None if no percentage is required)
def write_comment(fh, comment): """ Writes a comment to the file in Java properties format. Newlines in the comment text are automatically turned into a continuation of the comment by adding a "#" to the beginning of each line. :param fh: a writable file-like object :param comment: comment string to write """ _require_string(comment, 'comments') fh.write(_escape_comment(comment)) fh.write(b'\n')
Writes a comment to the file in Java properties format. Newlines in the comment text are automatically turned into a continuation of the comment by adding a "#" to the beginning of each line. :param fh: a writable file-like object :param comment: comment string to write
def forward_backward(self, data_batch): """A convenient function that calls both ``forward`` and ``backward``.""" self.forward(data_batch, is_train=True) self.backward()
A convenient function that calls both ``forward`` and ``backward``.
def get_build_configuration_set_raw(id=None, name=None): """ Get a specific BuildConfigurationSet by name or ID """ found_id = common.set_id(pnc_api.build_group_configs, id, name) response = utils.checked_api_call(pnc_api.build_group_configs, 'get_specific', id=found_id) if response: return response.content
Get a specific BuildConfigurationSet by name or ID
def visit_and_update(self, visitor_fn): """Create an updated version (if needed) of BinaryComposition via the visitor pattern.""" new_left = self.left.visit_and_update(visitor_fn) new_right = self.right.visit_and_update(visitor_fn) if new_left is not self.left or new_right is not self.right: return visitor_fn(BinaryComposition(self.operator, new_left, new_right)) else: return visitor_fn(self)
Create an updated version (if needed) of BinaryComposition via the visitor pattern.
def hr(color): """ Colored horizontal rule printer/logger factory. The resulting function prints an entire terminal row with the given symbol repeated. It's a terminal version of the HTML ``<hr/>``. """ logger = log(color) return lambda symbol: logger(symbol * terminal_size.usable_width)
Colored horizontal rule printer/logger factory. The resulting function prints an entire terminal row with the given symbol repeated. It's a terminal version of the HTML ``<hr/>``.
async def revoke_cred(self, rr_id: str, cr_id) -> int: """ Revoke credential that input revocation registry identifier and credential revocation identifier specify. Return (epoch seconds) time of revocation. Raise AbsentTails if no tails file is available for input revocation registry identifier. Raise WalletState for closed wallet. Raise BadRevocation if issuer cannot revoke specified credential for any other reason (e.g., did not issue it, already revoked it). :param rr_id: revocation registry identifier :param cr_id: credential revocation identifier :return: time of revocation, in epoch seconds """ LOGGER.debug('Issuer.revoke_cred >>> rr_id: %s, cr_id: %s', rr_id, cr_id) if not self.wallet.handle: LOGGER.debug('Issuer.revoke_cred <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) if not ok_rev_reg_id(rr_id): LOGGER.debug('Issuer.revoke_cred <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) tails_reader_handle = (await Tails( self.dir_tails, *rev_reg_id2cred_def_id_tag(rr_id)).open()).reader_handle try: rrdelta_json = await anoncreds.issuer_revoke_credential( self.wallet.handle, tails_reader_handle, rr_id, cr_id) except IndyError as x_indy: LOGGER.debug( 'Issuer.revoke_cred <!< Could not revoke revoc reg id %s, cred rev id %s: indy error code %s', rr_id, cr_id, x_indy.error_code) raise BadRevocation( 'Could not revoke revoc reg id {}, cred rev id {}: indy error code {}'.format( rr_id, cr_id, x_indy.error_code)) rr_ent_req_json = await ledger.build_revoc_reg_entry_request(self.did, rr_id, 'CL_ACCUM', rrdelta_json) resp_json = await self._sign_submit(rr_ent_req_json) # raises AbsentPool or ClosedPool if applicable resp = json.loads(resp_json) rv = self.pool.protocol.txn2epoch(resp) LOGGER.debug('Issuer.revoke_cred <<< %s', rv) return rv
Revoke credential that input revocation registry identifier and credential revocation identifier specify. Return (epoch seconds) time of revocation. Raise AbsentTails if no tails file is available for input revocation registry identifier. Raise WalletState for closed wallet. Raise BadRevocation if issuer cannot revoke specified credential for any other reason (e.g., did not issue it, already revoked it). :param rr_id: revocation registry identifier :param cr_id: credential revocation identifier :return: time of revocation, in epoch seconds
def concordance_index_censored(event_indicator, event_time, estimate, tied_tol=1e-8): """Concordance index for right-censored data The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Samples are comparable if for at least one of them an event occurred. If the estimated risk is larger for the sample with a higher time of event/censoring, the predictions of that pair are said to be concordant. If an event occurred for one sample and the other is known to be event-free at least until the time of event of the first, the second sample is assumed to *outlive* the first. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. A pair is not comparable if an event occurred for both of them at the same time or an event occurred for one of them but the time of censoring is smaller than the time of event of the first one. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996. """ event_indicator, event_time, estimate = _check_inputs( event_indicator, event_time, estimate) w = numpy.ones_like(estimate) return _estimate_concordance_index(event_indicator, event_time, estimate, w, tied_tol)
Concordance index for right-censored data The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Samples are comparable if for at least one of them an event occurred. If the estimated risk is larger for the sample with a higher time of event/censoring, the predictions of that pair are said to be concordant. If an event occurred for one sample and the other is known to be event-free at least until the time of event of the first, the second sample is assumed to *outlive* the first. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. A pair is not comparable if an event occurred for both of them at the same time or an event occurred for one of them but the time of censoring is smaller than the time of event of the first one. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996.
def run(self): """Launch filtering, sorting and paging to output results.""" query = self.query # count before filtering self.cardinality = query.add_columns(self.columns[0].sqla_expr).count() self._set_column_filter_expressions() self._set_global_filter_expression() self._set_sort_expressions() self._set_yadcf_data(query) # apply filters query = query.filter( *[e for e in self.filter_expressions if e is not None]) self.cardinality_filtered = query.add_columns( self.columns[0].sqla_expr).count() # apply sorts query = query.order_by( *[e for e in self.sort_expressions if e is not None]) # add paging options length = int(self.params.get('length')) if length >= 0: query = query.limit(length) elif length == -1: pass else: raise (ValueError( 'Length should be a positive integer or -1 to disable')) query = query.offset(int(self.params.get('start'))) # add columns to query query = query.add_columns(*[c.sqla_expr for c in self.columns]) # fetch the result of the queries column_names = [ col.mData if col.mData else str(i) for i, col in enumerate(self.columns) ] self.results = [{k: v for k, v in zip(column_names, row)} for row in query.all()]
Launch filtering, sorting and paging to output results.
def walk_trie(dicts, w, q, A): """ Helper function for traversing the word trie. It simultaneously keeps track of the active Automaton state, producing the intersection of the given Automaton and the trie (which is equivalent to a DFA). Once an invalid "terminating" state is reached, the children nodes are immediately dismissed from the recursive stack """ if q in A.F and '__end__' in dicts: yield w for key in dicts.keys(): if key == '__end__': continue if A.transition_function(q, key) is None: return try: yield from walk_trie(dicts[key], w + key, A.transition_function(q, key), A) except: return
Helper function for traversing the word trie. It simultaneously keeps track of the active Automaton state, producing the intersection of the given Automaton and the trie (which is equivalent to a DFA). Once an invalid "terminating" state is reached, the children nodes are immediately dismissed from the recursive stack
def diagonal_neural_gpu(inputs, hparams, name=None): """Improved Neural GPU as in https://arxiv.org/abs/1702.08727.""" with tf.variable_scope(name, "diagonal_neural_gpu"): def step(state_tup, inp): """Single step of the improved Neural GPU.""" state, _ = state_tup x = state for layer in range(hparams.num_hidden_layers): x, new_loss = common_layers.diagonal_conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, dropout=hparams.dropout, name="dcgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001) new_state = tf.where(padding_inp, state, x) # No-op where inp is padding. return new_state, new_loss final_state, losses = tf.scan( step, tf.transpose(inputs, [1, 0, 2, 3]), initializer=(inputs, tf.constant(0.0)), parallel_iterations=1, swap_memory=True) return final_state[0, :, :, :, :], 2.0 * tf.reduce_mean(losses)
Improved Neural GPU as in https://arxiv.org/abs/1702.08727.
def alias_proficiency(self, proficiency_id, alias_id): """Adds an ``Id`` to a ``Proficiency`` for the purpose of creating compatibility. The primary ``Id`` of the ``Proficiency`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another proficiency, it is reassigned to the given proficiency ``Id``. arg: proficiency_id (osid.id.Id): the ``Id`` of a ``Proficiency`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``proficiency_id`` not found raise: NullArgument - ``proficiency_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.alias_resources_template self._alias_id(primary_id=proficiency_id, equivalent_id=alias_id)
Adds an ``Id`` to a ``Proficiency`` for the purpose of creating compatibility. The primary ``Id`` of the ``Proficiency`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another proficiency, it is reassigned to the given proficiency ``Id``. arg: proficiency_id (osid.id.Id): the ``Id`` of a ``Proficiency`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``proficiency_id`` not found raise: NullArgument - ``proficiency_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def read(self, n=-1): """Read up to n items (bytes/chars) from the lob and return them. If n is -1 then all available data is returned. Might trigger further loading of data from the database if the number of items requested for reading is larger than what is currently buffered. """ pos = self.tell() num_items_to_read = n if n != -1 else self.length - pos # calculate the position of the file pointer after data was read: new_pos = min(pos + num_items_to_read, self.length) if new_pos > self._current_lob_length: missing_num_items_to_read = new_pos - self._current_lob_length self._read_missing_lob_data_from_db(self._current_lob_length, missing_num_items_to_read) # reposition file pointer to original position as reading in IO buffer might have changed it self.seek(pos, SEEK_SET) return self.data.read(n)
Read up to n items (bytes/chars) from the lob and return them. If n is -1 then all available data is returned. Might trigger further loading of data from the database if the number of items requested for reading is larger than what is currently buffered.
def init_log_config(handler=None): """Set up the application logging (not to be confused with check loggers). """ for applog in lognames.values(): # propagate except for root app logger 'linkcheck' propagate = (applog != LOG_ROOT) configdict['loggers'][applog] = dict(level='INFO', propagate=propagate) logging.config.dictConfig(configdict) if handler is None: handler = ansicolor.ColoredStreamHandler(strm=sys.stderr) add_loghandler(handler)
Set up the application logging (not to be confused with check loggers).
def update_work_as_completed(self, worker_id, work_id, other_values=None, error=None): """Updates work piece in datastore as completed. Args: worker_id: ID of the worker which did the work work_id: ID of the work which was done other_values: dictionary with additonal values which should be saved with the work piece error: if not None then error occurred during computation of the work piece. In such case work will be marked as completed with error. Returns: whether work was successfully updated """ client = self._datastore_client try: with client.transaction() as transaction: work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, work_id) work_entity = client.get(work_key, transaction=transaction) if work_entity['claimed_worker_id'] != worker_id: return False work_entity['is_completed'] = True if other_values: work_entity.update(other_values) if error: work_entity['error'] = text_type(error) transaction.put(work_entity) except Exception: return False return True
Updates work piece in datastore as completed. Args: worker_id: ID of the worker which did the work work_id: ID of the work which was done other_values: dictionary with additonal values which should be saved with the work piece error: if not None then error occurred during computation of the work piece. In such case work will be marked as completed with error. Returns: whether work was successfully updated
def add(self, name, value): """ Append a value to multiple value parameter. """ clone = self._clone() clone._qsl = [p for p in self._qsl if not(p[0] == name and p[1] == value)] clone._qsl.append((name, value,)) return clone
Append a value to multiple value parameter.
def save(self): """ save or update this cluster in Ariane Server :return: """ LOGGER.debug("Cluster.save") post_payload = {} consolidated_containers_id = [] if self.id is not None: post_payload['clusterID'] = self.id if self.name is not None: post_payload['clusterName'] = self.name if self.containers_id is not None: consolidated_containers_id = copy.deepcopy(self.containers_id) if self.containers_2_rm is not None: for container_2_rm in self.containers_2_rm: if container_2_rm.id is None: container_2_rm.sync() consolidated_containers_id.remove(container_2_rm.id) if self.containers_2_add is not None: for container_2_add in self.containers_2_add: if container_2_add.id is None: container_2_add.save() consolidated_containers_id.append(container_2_add.id) post_payload['clusterContainersID'] = consolidated_containers_id params = SessionService.complete_transactional_req({'payload': json.dumps(post_payload)}) if MappingService.driver_type != DriverFactory.DRIVER_REST: params['OPERATION'] = 'createCluster' args = {'properties': params} else: args = { 'http_operation': 'POST', 'operation_path': '', 'parameters': params } response = ClusterService.requester.call(args) if MappingService.driver_type != DriverFactory.DRIVER_REST: response = response.get() if response.rc != 0: LOGGER.warning('Cluster.save - Problem while saving cluster' + self.name + '. Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + " (" + str(response.rc) + ")") if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message: raise ArianeMappingOverloadError("Cluster.save", ArianeMappingOverloadError.ERROR_MSG) # traceback.print_stack() else: self.id = response.response_content['clusterID'] if self.containers_2_add is not None: for container_2_add in self.containers_2_add: container_2_add.sync() if self.containers_2_rm is not None: for container_2_rm in self.containers_2_rm: container_2_rm.sync() self.sync(json_obj=response.response_content) self.containers_2_add.clear() self.containers_2_rm.clear()
save or update this cluster in Ariane Server :return:
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: smart_class_parameters /api/puppetclasses/:puppetclass_id/smart_class_parameters Otherwise, call ``super``. """ if which in ('smart_class_parameters', 'smart_variables'): return '{0}/{1}'.format( super(PuppetClass, self).path(which='self'), which ) return super(PuppetClass, self).path(which)
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: smart_class_parameters /api/puppetclasses/:puppetclass_id/smart_class_parameters Otherwise, call ``super``.
def _make_version(major, minor, micro, releaselevel, serial): """Create a readable version string from version_info tuple components.""" assert releaselevel in ['alpha', 'beta', 'candidate', 'final'] version = "%d.%d" % (major, minor) if micro: version += ".%d" % (micro,) if releaselevel != 'final': short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel] version += "%s%d" % (short, serial) return version
Create a readable version string from version_info tuple components.
def handleDetailDblClick( self, item ): """ Handles when a detail item is double clicked on. :param item | <QTreeWidgetItem> """ if ( isinstance(item, XOrbRecordItem) ): self.emitRecordDoubleClicked(item.record())
Handles when a detail item is double clicked on. :param item | <QTreeWidgetItem>
def is_hermitian( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: """Determines if a matrix is approximately Hermitian. A matrix is Hermitian if it's square and equal to its adjoint. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is Hermitian within the given tolerance. """ return (matrix.shape[0] == matrix.shape[1] and np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))
Determines if a matrix is approximately Hermitian. A matrix is Hermitian if it's square and equal to its adjoint. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is Hermitian within the given tolerance.
def update_refs(self, ref_updates, repository_id, project=None, project_id=None): """UpdateRefs. Creating, updating, or deleting refs(branches). :param [GitRefUpdate] ref_updates: List of ref updates to attempt to perform :param str repository_id: The name or ID of the repository. :param str project: Project ID or project name :param str project_id: ID or name of the team project. Optional if specifying an ID for repository. :rtype: [GitRefUpdateResult] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if project_id is not None: query_parameters['projectId'] = self._serialize.query('project_id', project_id, 'str') content = self._serialize.body(ref_updates, '[GitRefUpdate]') response = self._send(http_method='POST', location_id='2d874a60-a811-4f62-9c9f-963a6ea0a55b', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('[GitRefUpdateResult]', self._unwrap_collection(response))
UpdateRefs. Creating, updating, or deleting refs(branches). :param [GitRefUpdate] ref_updates: List of ref updates to attempt to perform :param str repository_id: The name or ID of the repository. :param str project: Project ID or project name :param str project_id: ID or name of the team project. Optional if specifying an ID for repository. :rtype: [GitRefUpdateResult]
def start(self): """Start the job. This will begin pulling tasks from the taskmaster and executing them, and return when there are no more tasks. If a task fails to execute (i.e. execute() raises an exception), then the job will stop.""" while True: task = self.taskmaster.next_task() if task is None: break try: task.prepare() if task.needs_execute(): task.execute() except: if self.interrupted(): try: raise SCons.Errors.BuildError( task.targets[0], errstr=interrupt_msg) except: task.exception_set() else: task.exception_set() # Let the failed() callback function arrange for the # build to stop if that's appropriate. task.failed() else: task.executed() task.postprocess() self.taskmaster.cleanup()
Start the job. This will begin pulling tasks from the taskmaster and executing them, and return when there are no more tasks. If a task fails to execute (i.e. execute() raises an exception), then the job will stop.
def _prt_line_detail(self, prt, line, lnum=""): """Print each field and its value.""" data = zip(self.flds, line.split('\t')) txt = ["{:2}) {:13} {}".format(i, hdr, val) for i, (hdr, val) in enumerate(data)] prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT='\n'.join(txt)))
Print each field and its value.
def consume_keys_asynchronous_processes(self): """ Work through the keys to look up asynchronously using multiple processes """ print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n") jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \ else self.input_queue.qsize() pool = multiprocessing.Pool(processes=jobs, maxtasksperchild=10) for x in range(jobs): pool.apply(self.data_worker, [], self.worker_args) pool.close() pool.join()
Work through the keys to look up asynchronously using multiple processes
def SetAlpha(self, alpha): ''' Change the window's transparency :param alpha: From 0 to 1 with 0 being completely transparent :return: ''' self._AlphaChannel = alpha * 255 if self._AlphaChannel is not None: self.MasterFrame.SetTransparent(self._AlphaChannel)
Change the window's transparency :param alpha: From 0 to 1 with 0 being completely transparent :return:
def is_valid_address(self, *args, **kwargs): """ check address Accepts: - address [hex string] (withdrawal address in hex form) - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) Returns dictionary with following fields: - bool [Bool] """ client = HTTPClient(self.withdraw_server_address + self.withdraw_endpoint) return client.request('is_valid_address', kwargs)
check address Accepts: - address [hex string] (withdrawal address in hex form) - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) Returns dictionary with following fields: - bool [Bool]
def inter(a, b): """ Intersect two sets of any data type to form a third set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inter_c.html :param a: First input set. :type a: spiceypy.utils.support_types.SpiceCell :param b: Second input set. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(a, stypes.SpiceCell) assert isinstance(b, stypes.SpiceCell) assert a.dtype == b.dtype # Next line was redundant with [raise NotImpImplementedError] below # assert a.dtype == 0 or a.dtype == 1 or a.dtype == 2 if a.dtype is 0: c = stypes.SPICECHAR_CELL(max(a.size, b.size), max(a.length, b.length)) elif a.dtype is 1: c = stypes.SPICEDOUBLE_CELL(max(a.size, b.size)) elif a.dtype is 2: c = stypes.SPICEINT_CELL(max(a.size, b.size)) else: raise NotImplementedError libspice.inter_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
Intersect two sets of any data type to form a third set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inter_c.html :param a: First input set. :type a: spiceypy.utils.support_types.SpiceCell :param b: Second input set. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell
def output(self, out_file): """Write the converted entries to out_file""" self.out_file = out_file out_file.write('event: ns : Nanoseconds\n') out_file.write('events: ns\n') self._output_summary() for entry in sorted(self.entries, key=_entry_sort_key): self._output_entry(entry)
Write the converted entries to out_file
def get_runs(project_name): """ Returns a dict of runs present in project. :return: dict -> {<int_keys>: <project_name>} """ conn, c = open_data_base_connection() try: c.execute("""SELECT run_name FROM {}""".format(project_name + "_run_table")) run_names = np.array(c.fetchall()).squeeze(axis=1) run_names = convert_list_to_dict(run_names) return run_names except sqlite3.OperationalError: logging.info("{} not found".format(run_name)) finally: conn.close()
Returns a dict of runs present in project. :return: dict -> {<int_keys>: <project_name>}
def _set_suppress_arp(self, v, load=False): """ Setter method for suppress_arp, mapped from YANG variable /bridge_domain/suppress_arp (container) If this variable is read-only (config: false) in the source YANG file, then _set_suppress_arp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_suppress_arp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=suppress_arp.suppress_arp, is_container='container', presence=False, yang_name="suppress-arp", rest_name="suppress-arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP suppression'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """suppress_arp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=suppress_arp.suppress_arp, is_container='container', presence=False, yang_name="suppress-arp", rest_name="suppress-arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP suppression'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True)""", }) self.__suppress_arp = t if hasattr(self, '_set'): self._set()
Setter method for suppress_arp, mapped from YANG variable /bridge_domain/suppress_arp (container) If this variable is read-only (config: false) in the source YANG file, then _set_suppress_arp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_suppress_arp() directly.
def _dos2unix_cygwin(self, file_path): """ Use cygwin to convert file to unix format """ dos2unix_cmd = \ [os.path.join(self._cygwin_bin_location, "dos2unix.exe"), self._get_cygwin_path(file_path)] process = Popen(dos2unix_cmd, stdout=PIPE, stderr=PIPE, shell=False) process.communicate()
Use cygwin to convert file to unix format
def _push_to_list(lst, func_name, char, line, offset, first_arg_pos, first_item, in_list_literal, lead_spaces, options=None): """ _push_to_list(lst : [str], func_name : str, char : str, line : int, offset : int, first_arg_pos :int , first_item : int, in_list_literal : bool, lead_spaces : int, options : str) Called when an opening bracket is encountered. A hash containing the necessary data to pin point errors and the indentation level is stored in the list and the list returned. """ opts = parse_options(options) keywords = add_keywords(opts) pos_hash = {'character': char, 'line_number': line, 'bracket_pos': offset, 'indent_level': offset + first_arg_pos, # the default value, e.g in normal function 'func_name': func_name, 'spaces': 0} is_macro = is_macro_name(func_name, opts.dialect) two_spacer = is_macro or keywords[func_name] in [KEYWORD1, KEYWORD4] if in_list_literal or char == '{' or (char == '[' and opts.dialect == 'clojure'): # found quoted list or clojure hashmap/vector pos_hash['indent_level'] = first_item elif keywords[func_name] == KEYWORD2: # We only make the if-clause stand out if not in uniform mode pos_hash['indent_level'] = lead_spaces + ((offset + opts.indent_size * 2) if not opts.uniform else (offset + opts.indent_size)) elif func_name != '': if two_spacer: pos_hash['indent_level'] = lead_spaces + offset + opts.indent_size elif keywords[func_name] == KEYWORD3: pos_hash['indent_level'] = lead_spaces + offset + (2 * opts.indent_size) lst.append(pos_hash) try: # A hack to make flets and labels in Lisp not indent like # functions. The 'labels' indentation may not be exactly # perfect. parent_func = lst[-3]['func_name'] # Make 'special' indentation occur only in a Clojure binding block([]) for # letfns non_bind_block = opts.dialect == 'clojure' and lst[-2]['character'] != '[' if keywords[parent_func] == KEYWORD4 and not non_bind_block: lst[-1]['indent_level'] = offset + opts.indent_size except IndexError: pass return lst
_push_to_list(lst : [str], func_name : str, char : str, line : int, offset : int, first_arg_pos :int , first_item : int, in_list_literal : bool, lead_spaces : int, options : str) Called when an opening bracket is encountered. A hash containing the necessary data to pin point errors and the indentation level is stored in the list and the list returned.
def save(criteria, report, report_path, adv_x_val): """ Saves the report and adversarial examples. :param criteria: dict, of the form returned by AttackGoal.get_criteria :param report: dict containing a confidence report :param report_path: string, filepath :param adv_x_val: numpy array containing dataset of adversarial examples """ print_stats(criteria['correctness'], criteria['confidence'], 'bundled') print("Saving to " + report_path) serial.save(report_path, report) assert report_path.endswith(".joblib") adv_x_path = report_path[:-len(".joblib")] + "_adv.npy" np.save(adv_x_path, adv_x_val)
Saves the report and adversarial examples. :param criteria: dict, of the form returned by AttackGoal.get_criteria :param report: dict containing a confidence report :param report_path: string, filepath :param adv_x_val: numpy array containing dataset of adversarial examples
def instructions(self): """ Retrieve the instructions for the rule. """ if self._instructions is None: # Compile the rule into an Instructions instance; we do # this lazily to amortize the cost of the compilation, # then cache that result for efficiency... self._instructions = parser.parse_rule(self.name, self.text) return self._instructions
Retrieve the instructions for the rule.
def checkAndCreate(self, key, payload, hostgroupConf, hostgroupParent, puppetClassesId): """ Function checkAndCreate check And Create procedure for an hostgroup - check the hostgroup is not existing - create the hostgroup - Add puppet classes from puppetClassesId - Add params from hostgroupConf @param key: The hostgroup name or ID @param payload: The description of the hostgroup @param hostgroupConf: The configuration of the host group from the foreman.conf @param hostgroupParent: The id of the parent hostgroup @param puppetClassesId: The dict of puppet classes ids in foreman @return RETURN: The ItemHostsGroup object of an host """ if key not in self: self[key] = payload oid = self[key]['id'] if not oid: return False # Create Hostgroup classes if 'classes' in hostgroupConf.keys(): classList = list() for c in hostgroupConf['classes']: classList.append(puppetClassesId[c]) if not self[key].checkAndCreateClasses(classList): print("Failed in classes") return False # Set params if 'params' in hostgroupConf.keys(): if not self[key].checkAndCreateParams(hostgroupConf['params']): print("Failed in params") return False return oid
Function checkAndCreate check And Create procedure for an hostgroup - check the hostgroup is not existing - create the hostgroup - Add puppet classes from puppetClassesId - Add params from hostgroupConf @param key: The hostgroup name or ID @param payload: The description of the hostgroup @param hostgroupConf: The configuration of the host group from the foreman.conf @param hostgroupParent: The id of the parent hostgroup @param puppetClassesId: The dict of puppet classes ids in foreman @return RETURN: The ItemHostsGroup object of an host
def text(self,txt): """ Print Utf8 encoded alpha-numeric text """ if not txt: return try: txt = txt.decode('utf-8') except: try: txt = txt.decode('utf-16') except: pass self.extra_chars = 0 def encode_char(char): """ Encodes a single utf-8 character into a sequence of esc-pos code page change instructions and character declarations """ char_utf8 = char.encode('utf-8') encoded = '' encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character encodings = { # TODO use ordering to prevent useless switches # TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis ) 'cp437': TXT_ENC_PC437, 'cp850': TXT_ENC_PC850, 'cp852': TXT_ENC_PC852, 'cp857': TXT_ENC_PC857, 'cp858': TXT_ENC_PC858, 'cp860': TXT_ENC_PC860, 'cp863': TXT_ENC_PC863, 'cp865': TXT_ENC_PC865, 'cp866': TXT_ENC_PC866, 'cp862': TXT_ENC_PC862, 'cp720': TXT_ENC_PC720, 'cp936': TXT_ENC_PC936, 'iso8859_2': TXT_ENC_8859_2, 'iso8859_7': TXT_ENC_8859_7, 'iso8859_9': TXT_ENC_8859_9, 'cp1254' : TXT_ENC_WPC1254, 'cp1255' : TXT_ENC_WPC1255, 'cp1256' : TXT_ENC_WPC1256, 'cp1257' : TXT_ENC_WPC1257, 'cp1258' : TXT_ENC_WPC1258, 'katakana' : TXT_ENC_KATAKANA, } remaining = copy.copy(encodings) if not encoding : encoding = 'cp437' while True: # Trying all encoding until one succeeds try: if encoding == 'katakana': # Japanese characters if jcconv: # try to convert japanese text to a half-katakanas kata = jcconv.kata2half(jcconv.hira2kata(char_utf8)) if kata != char_utf8: self.extra_chars += len(kata.decode('utf-8')) - 1 # the conversion may result in multiple characters return encode_str(kata.decode('utf-8')) else: kata = char_utf8 if kata in TXT_ENC_KATAKANA_MAP: encoded = TXT_ENC_KATAKANA_MAP[kata] break else: raise ValueError() else: encoded = char.encode(encoding) break except ValueError: #the encoding failed, select another one and retry if encoding in remaining: del remaining[encoding] if len(remaining) >= 1: encoding = remaining.items()[0][0] else: encoding = 'cp437' encoded = '\xb1' # could not encode, output error character break; if encoding != self.encoding: # if the encoding changed, remember it and prefix the character with # the esc-pos encoding change sequence self.encoding = encoding encoded = encodings[encoding] + encoded return encoded def encode_str(txt): buffer = '' for c in txt: buffer += encode_char(c) return buffer txt = encode_str(txt) # if the utf-8 -> codepage conversion inserted extra characters, # remove double spaces to try to restore the original string length # and prevent printing alignment issues while self.extra_chars > 0: dspace = txt.find(' ') if dspace > 0: txt = txt[:dspace] + txt[dspace+1:] self.extra_chars -= 1 else: break self._raw(txt)
Print Utf8 encoded alpha-numeric text
def batch_iterable(l, n): ''' Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery''' i = iter(l) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
def get_available_name(self, name): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ dir_name, file_name = os.path.split(name) file_root, file_ext = os.path.splitext(file_name) # If the filename already exists, add an underscore and a number (before # the file extension, if one exists) to the filename until the generated # filename doesn't exist. count = itertools.count(1) while self.exists(name): # file_ext includes the dot. name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext)) return name
Returns a filename that's free on the target storage system, and available for new content to be written to.
def create_user_pool(PoolName=None, Policies=None, LambdaConfig=None, AutoVerifiedAttributes=None, AliasAttributes=None, SmsVerificationMessage=None, EmailVerificationMessage=None, EmailVerificationSubject=None, SmsAuthenticationMessage=None, MfaConfiguration=None, DeviceConfiguration=None, EmailConfiguration=None, SmsConfiguration=None, UserPoolTags=None, AdminCreateUserConfig=None, Schema=None): """ Creates a new Amazon Cognito user pool and sets the password policy for the pool. See also: AWS API Documentation :example: response = client.create_user_pool( PoolName='string', Policies={ 'PasswordPolicy': { 'MinimumLength': 123, 'RequireUppercase': True|False, 'RequireLowercase': True|False, 'RequireNumbers': True|False, 'RequireSymbols': True|False } }, LambdaConfig={ 'PreSignUp': 'string', 'CustomMessage': 'string', 'PostConfirmation': 'string', 'PreAuthentication': 'string', 'PostAuthentication': 'string', 'DefineAuthChallenge': 'string', 'CreateAuthChallenge': 'string', 'VerifyAuthChallengeResponse': 'string' }, AutoVerifiedAttributes=[ 'phone_number'|'email', ], AliasAttributes=[ 'phone_number'|'email'|'preferred_username', ], SmsVerificationMessage='string', EmailVerificationMessage='string', EmailVerificationSubject='string', SmsAuthenticationMessage='string', MfaConfiguration='OFF'|'ON'|'OPTIONAL', DeviceConfiguration={ 'ChallengeRequiredOnNewDevice': True|False, 'DeviceOnlyRememberedOnUserPrompt': True|False }, EmailConfiguration={ 'SourceArn': 'string', 'ReplyToEmailAddress': 'string' }, SmsConfiguration={ 'SnsCallerArn': 'string', 'ExternalId': 'string' }, UserPoolTags={ 'string': 'string' }, AdminCreateUserConfig={ 'AllowAdminCreateUserOnly': True|False, 'UnusedAccountValidityDays': 123, 'InviteMessageTemplate': { 'SMSMessage': 'string', 'EmailMessage': 'string', 'EmailSubject': 'string' } }, Schema=[ { 'Name': 'string', 'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean', 'DeveloperOnlyAttribute': True|False, 'Mutable': True|False, 'Required': True|False, 'NumberAttributeConstraints': { 'MinValue': 'string', 'MaxValue': 'string' }, 'StringAttributeConstraints': { 'MinLength': 'string', 'MaxLength': 'string' } }, ] ) :type PoolName: string :param PoolName: [REQUIRED] A string used to name the user pool. :type Policies: dict :param Policies: The policies associated with the new user pool. PasswordPolicy (dict) --A container for information about the user pool password policy. MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6. RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password. RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password. RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password. RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password. :type LambdaConfig: dict :param LambdaConfig: The Lambda trigger configuration information for the new user pool. PreSignUp (string) --A pre-registration AWS Lambda trigger. CustomMessage (string) --A custom Message AWS Lambda trigger. PostConfirmation (string) --A post-confirmation AWS Lambda trigger. PreAuthentication (string) --A pre-authentication AWS Lambda trigger. PostAuthentication (string) --A post-authentication AWS Lambda trigger. DefineAuthChallenge (string) --Defines the authentication challenge. CreateAuthChallenge (string) --Creates an authentication challenge. VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response. :type AutoVerifiedAttributes: list :param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number . (string) -- :type AliasAttributes: list :param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username . (string) -- :type SmsVerificationMessage: string :param SmsVerificationMessage: A string representing the SMS verification message. :type EmailVerificationMessage: string :param EmailVerificationMessage: A string representing the email verification message. :type EmailVerificationSubject: string :param EmailVerificationSubject: A string representing the email verification subject. :type SmsAuthenticationMessage: string :param SmsAuthenticationMessage: A string representing the SMS authentication message. :type MfaConfiguration: string :param MfaConfiguration: Specifies MFA configuration details. :type DeviceConfiguration: dict :param DeviceConfiguration: The device configuration. ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device. DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt. :type EmailConfiguration: dict :param EmailConfiguration: The email configuration. SourceArn (string) --The Amazon Resource Name (ARN) of the email source. ReplyToEmailAddress (string) --The REPLY-TO email address. :type SmsConfiguration: dict :param SmsConfiguration: The SMS configuration. SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. ExternalId (string) --The external ID. :type UserPoolTags: dict :param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool (string) -- (string) -- :type AdminCreateUserConfig: dict :param AdminCreateUserConfig: The configuration for AdminCreateUser requests. AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app. UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7. InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users. SMSMessage (string) --The message template for SMS messages. EmailMessage (string) --The message template for email messages. EmailSubject (string) --The subject line for email messages. :type Schema: list :param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes. (dict) --Contains information about the schema attribute. Name (string) --A schema attribute of the name type. AttributeDataType (string) --The attribute data type. DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only. Mutable (boolean) --Specifies whether the attribute can be changed once it has been created. Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail. NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type. MinValue (string) --The minimum value of an attribute that is of the number data type. MaxValue (string) --The maximum value of an attribute that is of the number data type. StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type. MinLength (string) --The minimum length of an attribute value of the string type. MaxLength (string) --The maximum length of an attribute value of the string type. :rtype: dict :return: { 'UserPool': { 'Id': 'string', 'Name': 'string', 'Policies': { 'PasswordPolicy': { 'MinimumLength': 123, 'RequireUppercase': True|False, 'RequireLowercase': True|False, 'RequireNumbers': True|False, 'RequireSymbols': True|False } }, 'LambdaConfig': { 'PreSignUp': 'string', 'CustomMessage': 'string', 'PostConfirmation': 'string', 'PreAuthentication': 'string', 'PostAuthentication': 'string', 'DefineAuthChallenge': 'string', 'CreateAuthChallenge': 'string', 'VerifyAuthChallengeResponse': 'string' }, 'Status': 'Enabled'|'Disabled', 'LastModifiedDate': datetime(2015, 1, 1), 'CreationDate': datetime(2015, 1, 1), 'SchemaAttributes': [ { 'Name': 'string', 'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean', 'DeveloperOnlyAttribute': True|False, 'Mutable': True|False, 'Required': True|False, 'NumberAttributeConstraints': { 'MinValue': 'string', 'MaxValue': 'string' }, 'StringAttributeConstraints': { 'MinLength': 'string', 'MaxLength': 'string' } }, ], 'AutoVerifiedAttributes': [ 'phone_number'|'email', ], 'AliasAttributes': [ 'phone_number'|'email'|'preferred_username', ], 'SmsVerificationMessage': 'string', 'EmailVerificationMessage': 'string', 'EmailVerificationSubject': 'string', 'SmsAuthenticationMessage': 'string', 'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL', 'DeviceConfiguration': { 'ChallengeRequiredOnNewDevice': True|False, 'DeviceOnlyRememberedOnUserPrompt': True|False }, 'EstimatedNumberOfUsers': 123, 'EmailConfiguration': { 'SourceArn': 'string', 'ReplyToEmailAddress': 'string' }, 'SmsConfiguration': { 'SnsCallerArn': 'string', 'ExternalId': 'string' }, 'UserPoolTags': { 'string': 'string' }, 'SmsConfigurationFailure': 'string', 'EmailConfigurationFailure': 'string', 'AdminCreateUserConfig': { 'AllowAdminCreateUserOnly': True|False, 'UnusedAccountValidityDays': 123, 'InviteMessageTemplate': { 'SMSMessage': 'string', 'EmailMessage': 'string', 'EmailSubject': 'string' } } } } :returns: (string) -- """ pass
Creates a new Amazon Cognito user pool and sets the password policy for the pool. See also: AWS API Documentation :example: response = client.create_user_pool( PoolName='string', Policies={ 'PasswordPolicy': { 'MinimumLength': 123, 'RequireUppercase': True|False, 'RequireLowercase': True|False, 'RequireNumbers': True|False, 'RequireSymbols': True|False } }, LambdaConfig={ 'PreSignUp': 'string', 'CustomMessage': 'string', 'PostConfirmation': 'string', 'PreAuthentication': 'string', 'PostAuthentication': 'string', 'DefineAuthChallenge': 'string', 'CreateAuthChallenge': 'string', 'VerifyAuthChallengeResponse': 'string' }, AutoVerifiedAttributes=[ 'phone_number'|'email', ], AliasAttributes=[ 'phone_number'|'email'|'preferred_username', ], SmsVerificationMessage='string', EmailVerificationMessage='string', EmailVerificationSubject='string', SmsAuthenticationMessage='string', MfaConfiguration='OFF'|'ON'|'OPTIONAL', DeviceConfiguration={ 'ChallengeRequiredOnNewDevice': True|False, 'DeviceOnlyRememberedOnUserPrompt': True|False }, EmailConfiguration={ 'SourceArn': 'string', 'ReplyToEmailAddress': 'string' }, SmsConfiguration={ 'SnsCallerArn': 'string', 'ExternalId': 'string' }, UserPoolTags={ 'string': 'string' }, AdminCreateUserConfig={ 'AllowAdminCreateUserOnly': True|False, 'UnusedAccountValidityDays': 123, 'InviteMessageTemplate': { 'SMSMessage': 'string', 'EmailMessage': 'string', 'EmailSubject': 'string' } }, Schema=[ { 'Name': 'string', 'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean', 'DeveloperOnlyAttribute': True|False, 'Mutable': True|False, 'Required': True|False, 'NumberAttributeConstraints': { 'MinValue': 'string', 'MaxValue': 'string' }, 'StringAttributeConstraints': { 'MinLength': 'string', 'MaxLength': 'string' } }, ] ) :type PoolName: string :param PoolName: [REQUIRED] A string used to name the user pool. :type Policies: dict :param Policies: The policies associated with the new user pool. PasswordPolicy (dict) --A container for information about the user pool password policy. MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6. RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password. RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password. RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password. RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password. :type LambdaConfig: dict :param LambdaConfig: The Lambda trigger configuration information for the new user pool. PreSignUp (string) --A pre-registration AWS Lambda trigger. CustomMessage (string) --A custom Message AWS Lambda trigger. PostConfirmation (string) --A post-confirmation AWS Lambda trigger. PreAuthentication (string) --A pre-authentication AWS Lambda trigger. PostAuthentication (string) --A post-authentication AWS Lambda trigger. DefineAuthChallenge (string) --Defines the authentication challenge. CreateAuthChallenge (string) --Creates an authentication challenge. VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response. :type AutoVerifiedAttributes: list :param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number . (string) -- :type AliasAttributes: list :param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username . (string) -- :type SmsVerificationMessage: string :param SmsVerificationMessage: A string representing the SMS verification message. :type EmailVerificationMessage: string :param EmailVerificationMessage: A string representing the email verification message. :type EmailVerificationSubject: string :param EmailVerificationSubject: A string representing the email verification subject. :type SmsAuthenticationMessage: string :param SmsAuthenticationMessage: A string representing the SMS authentication message. :type MfaConfiguration: string :param MfaConfiguration: Specifies MFA configuration details. :type DeviceConfiguration: dict :param DeviceConfiguration: The device configuration. ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device. DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt. :type EmailConfiguration: dict :param EmailConfiguration: The email configuration. SourceArn (string) --The Amazon Resource Name (ARN) of the email source. ReplyToEmailAddress (string) --The REPLY-TO email address. :type SmsConfiguration: dict :param SmsConfiguration: The SMS configuration. SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. ExternalId (string) --The external ID. :type UserPoolTags: dict :param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool (string) -- (string) -- :type AdminCreateUserConfig: dict :param AdminCreateUserConfig: The configuration for AdminCreateUser requests. AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app. UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7. InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users. SMSMessage (string) --The message template for SMS messages. EmailMessage (string) --The message template for email messages. EmailSubject (string) --The subject line for email messages. :type Schema: list :param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes. (dict) --Contains information about the schema attribute. Name (string) --A schema attribute of the name type. AttributeDataType (string) --The attribute data type. DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only. Mutable (boolean) --Specifies whether the attribute can be changed once it has been created. Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail. NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type. MinValue (string) --The minimum value of an attribute that is of the number data type. MaxValue (string) --The maximum value of an attribute that is of the number data type. StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type. MinLength (string) --The minimum length of an attribute value of the string type. MaxLength (string) --The maximum length of an attribute value of the string type. :rtype: dict :return: { 'UserPool': { 'Id': 'string', 'Name': 'string', 'Policies': { 'PasswordPolicy': { 'MinimumLength': 123, 'RequireUppercase': True|False, 'RequireLowercase': True|False, 'RequireNumbers': True|False, 'RequireSymbols': True|False } }, 'LambdaConfig': { 'PreSignUp': 'string', 'CustomMessage': 'string', 'PostConfirmation': 'string', 'PreAuthentication': 'string', 'PostAuthentication': 'string', 'DefineAuthChallenge': 'string', 'CreateAuthChallenge': 'string', 'VerifyAuthChallengeResponse': 'string' }, 'Status': 'Enabled'|'Disabled', 'LastModifiedDate': datetime(2015, 1, 1), 'CreationDate': datetime(2015, 1, 1), 'SchemaAttributes': [ { 'Name': 'string', 'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean', 'DeveloperOnlyAttribute': True|False, 'Mutable': True|False, 'Required': True|False, 'NumberAttributeConstraints': { 'MinValue': 'string', 'MaxValue': 'string' }, 'StringAttributeConstraints': { 'MinLength': 'string', 'MaxLength': 'string' } }, ], 'AutoVerifiedAttributes': [ 'phone_number'|'email', ], 'AliasAttributes': [ 'phone_number'|'email'|'preferred_username', ], 'SmsVerificationMessage': 'string', 'EmailVerificationMessage': 'string', 'EmailVerificationSubject': 'string', 'SmsAuthenticationMessage': 'string', 'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL', 'DeviceConfiguration': { 'ChallengeRequiredOnNewDevice': True|False, 'DeviceOnlyRememberedOnUserPrompt': True|False }, 'EstimatedNumberOfUsers': 123, 'EmailConfiguration': { 'SourceArn': 'string', 'ReplyToEmailAddress': 'string' }, 'SmsConfiguration': { 'SnsCallerArn': 'string', 'ExternalId': 'string' }, 'UserPoolTags': { 'string': 'string' }, 'SmsConfigurationFailure': 'string', 'EmailConfigurationFailure': 'string', 'AdminCreateUserConfig': { 'AllowAdminCreateUserOnly': True|False, 'UnusedAccountValidityDays': 123, 'InviteMessageTemplate': { 'SMSMessage': 'string', 'EmailMessage': 'string', 'EmailSubject': 'string' } } } } :returns: (string) --
def read_mol2(self, path, columns=None): """Reads Mol2 files (unzipped or gzipped) from local drive Note that if your mol2 file contains more than one molecule, only the first molecule is loaded into the DataFrame Attributes ---------- path : str Path to the Mol2 file in .mol2 format or gzipped format (.mol2.gz) columns : dict or None (default: None) If None, this methods expects a 9-column ATOM section that contains the following columns: {0:('atom_id', int), 1:('atom_name', str), 2:('x', float), 3:('y', float), 4:('z', float), 5:('atom_type', str), 6:('subst_id', int), 7:('subst_name', str), 8:('charge', float)} If your Mol2 files are formatted differently, you can provide your own column_mapping dictionary in a format similar to the one above. However, note that not all assert_raise_message methods may be supported then. Returns --------- self """ mol2_code, mol2_lines = next(split_multimol2(path)) self._load_mol2(mol2_lines, mol2_code, columns) self.mol2_path = path return self
Reads Mol2 files (unzipped or gzipped) from local drive Note that if your mol2 file contains more than one molecule, only the first molecule is loaded into the DataFrame Attributes ---------- path : str Path to the Mol2 file in .mol2 format or gzipped format (.mol2.gz) columns : dict or None (default: None) If None, this methods expects a 9-column ATOM section that contains the following columns: {0:('atom_id', int), 1:('atom_name', str), 2:('x', float), 3:('y', float), 4:('z', float), 5:('atom_type', str), 6:('subst_id', int), 7:('subst_name', str), 8:('charge', float)} If your Mol2 files are formatted differently, you can provide your own column_mapping dictionary in a format similar to the one above. However, note that not all assert_raise_message methods may be supported then. Returns --------- self
def resetVector(x1, x2): """ Copies the contents of vector x1 into vector x2. @param x1 (array) binary vector to be copied @param x2 (array) binary vector where x1 is copied """ size = len(x1) for i in range(size): x2[i] = x1[i]
Copies the contents of vector x1 into vector x2. @param x1 (array) binary vector to be copied @param x2 (array) binary vector where x1 is copied
def prepare_rows(table): """ Prepare the rows so they're all strings, and all the same length. :param table: A 2D grid of anything. :type table: [[``object``]] :return: A table of strings, where every row is the same length. :rtype: [[``str``]] """ num_columns = max(len(row) for row in table) for row in table: while len(row) < num_columns: row.append('') for i in range(num_columns): row[i] = str(row[i]) if row[i] is not None else '' return table
Prepare the rows so they're all strings, and all the same length. :param table: A 2D grid of anything. :type table: [[``object``]] :return: A table of strings, where every row is the same length. :rtype: [[``str``]]
def valuegetter(*fieldspecs, **kwargs): """ Modelled after `operator.itemgetter`. Takes a variable number of specs and returns a function, which applied to any `pymarc.Record` returns the matching values. Specs are in the form `field` or `field.subfield`, e.g. `020` or `020.9`. Example: >>> from marcx import Record, valuegetter >>> from urllib import urlopen >>> record = Record(data=urlopen("http://goo.gl/lfJnw9").read()) In two steps: >>> getter = valuegetter('020.a') >>> getter(record) <generator object values at 0x2d97690> >>> set(getter(record)) set(['020161622X']) Or in one step: >>> set(valuegetter('020.a')(record)) set(['020161622X']) A variable number of specs can be passed: >>> set(valuegetter('001', '005', '700.a')(record)) set(['20040816084925.0', 'Thomas, David,', '11778504']) Non-existent field tags can be passed - they are ignored: >>> set(valuegetter('002')(record)) set([]) @see also: `Record.itervalues` """ combine_subfields = kwargs.get('combine_subfields', False) pattern = r'(?P<field>[^.]+)(.(?P<subfield>[^.]+))?' def values(record): for s in fieldspecs: match = re.match(pattern, s) if not match: continue gd = match.groupdict() for field in record.get_fields(gd['field']): if gd['subfield']: for value in field.get_subfields(gd['subfield']): yield value else: if combine_subfields: yield field.value() else: if int(gd['field']) < 10: yield field.value() else: for value in field.subfields[1::2]: yield value values.__doc__ = 'returns a value generator over %s' % ( ', '.join(fieldspecs)) return values
Modelled after `operator.itemgetter`. Takes a variable number of specs and returns a function, which applied to any `pymarc.Record` returns the matching values. Specs are in the form `field` or `field.subfield`, e.g. `020` or `020.9`. Example: >>> from marcx import Record, valuegetter >>> from urllib import urlopen >>> record = Record(data=urlopen("http://goo.gl/lfJnw9").read()) In two steps: >>> getter = valuegetter('020.a') >>> getter(record) <generator object values at 0x2d97690> >>> set(getter(record)) set(['020161622X']) Or in one step: >>> set(valuegetter('020.a')(record)) set(['020161622X']) A variable number of specs can be passed: >>> set(valuegetter('001', '005', '700.a')(record)) set(['20040816084925.0', 'Thomas, David,', '11778504']) Non-existent field tags can be passed - they are ignored: >>> set(valuegetter('002')(record)) set([]) @see also: `Record.itervalues`
def get_org(self, organization_name=''): """ Retrieves an organization via given org name. If given empty string, prompts user for an org name. """ self.organization_name = organization_name if(organization_name == ''): self.organization_name = raw_input('Organization: ') print 'Getting organization.' self.org_retrieved = self.logged_in_gh.organization(organization_name)
Retrieves an organization via given org name. If given empty string, prompts user for an org name.
def stream(command, stdin=None, env=os.environ, timeout=None): """ Yields a generator of a command's output. For line oriented commands only. Args: command (str or list): a command without pipes. If it's not a list, ``shlex.split`` is applied. stdin (file like object): stream to use as the command's standard input. env (dict): The environment in which to execute the command. PATH should be defined. timeout (int): Amount of time in seconds to give the command to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the command. It should typically be wrapped in a ``reader``. """ if not isinstance(command, list): command = shlex.split(command) cmd = which(command[0]) if cmd is None: path = env.get("PATH", "") raise Exception("Command [%s] not in PATH [%s]" % (command[0], path)) command[0] = cmd if timeout: if not timeout_command[0]: raise Exception("Timeout specified but timeout command not available.") command = timeout_command + [str(timeout)] + command output = None try: output = Popen(command, env=env, stdin=stdin, **stream_options) yield output.stdout finally: if output: output.wait()
Yields a generator of a command's output. For line oriented commands only. Args: command (str or list): a command without pipes. If it's not a list, ``shlex.split`` is applied. stdin (file like object): stream to use as the command's standard input. env (dict): The environment in which to execute the command. PATH should be defined. timeout (int): Amount of time in seconds to give the command to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the command. It should typically be wrapped in a ``reader``.
def _from_dict(cls, _dict): """Initialize a SpeechRecognitionResult object from a json dictionary.""" args = {} if 'final' in _dict or 'final_results' in _dict: args['final_results'] = _dict.get('final') or _dict.get( 'final_results') else: raise ValueError( 'Required property \'final\' not present in SpeechRecognitionResult JSON' ) if 'alternatives' in _dict: args['alternatives'] = [ SpeechRecognitionAlternative._from_dict(x) for x in (_dict.get('alternatives')) ] else: raise ValueError( 'Required property \'alternatives\' not present in SpeechRecognitionResult JSON' ) if 'keywords_result' in _dict: args['keywords_result'] = _dict.get('keywords_result') if 'word_alternatives' in _dict: args['word_alternatives'] = [ WordAlternativeResults._from_dict(x) for x in (_dict.get('word_alternatives')) ] return cls(**args)
Initialize a SpeechRecognitionResult object from a json dictionary.
def refkeys(self,fields): "returns {ModelClass:list_of_pkey_tuples}. see syncschema.RefKey. Don't use this yet." # todo doc: better explanation of what refkeys are and how fields plays in dd=collections.defaultdict(list) if any(f not in self.REFKEYS for f in fields): raise ValueError(fields,'not all in',self.REFKEYS.keys()) for f in fields: rk=self.REFKEYS[f] for model in rk.refmodels: dd[model].extend(rk.pkeys(self,f)) return dd
returns {ModelClass:list_of_pkey_tuples}. see syncschema.RefKey. Don't use this yet.
def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0): """ Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous `get_pty` call. :param int width: new width (in characters) of the terminal screen :param int height: new height (in characters) of the terminal screen :param int width_pixels: new width (in pixels) of the terminal screen :param int height_pixels: new height (in pixels) of the terminal screen :raises SSHException: if the request was rejected or the channel was closed """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string('window-change') m.add_boolean(False) m.add_int(width) m.add_int(height) m.add_int(width_pixels) m.add_int(height_pixels) self.transport._send_user_message(m)
Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous `get_pty` call. :param int width: new width (in characters) of the terminal screen :param int height: new height (in characters) of the terminal screen :param int width_pixels: new width (in pixels) of the terminal screen :param int height_pixels: new height (in pixels) of the terminal screen :raises SSHException: if the request was rejected or the channel was closed
def to_result(self): """Convert to the Linter.run return value""" text = [self.text] if self.note: text.append(self.note) return { 'lnum': self.line_num, 'col': self.column, 'text': ' - '.join(text), 'type': self.types.get(self.message_type, '') }
Convert to the Linter.run return value
def get_item_list_by_name(self, item_list_name, category='own'): """ Retrieve an item list from the server as an ItemList object :type item_list_name: String :param item_list_name: name of the item list to retrieve :type category: String :param category: the category of lists to fetch. At the time of writing, supported values are "own" and "shared" :rtype: ItemList :returns: The ItemList :raises: APIError if the request was not successful """ resp = self.api_request('/item_lists') for item_list in resp[category]: if item_list['name'] == item_list_name: return self.get_item_list(item_list['item_list_url']) raise ValueError('List does not exist: ' + item_list_name)
Retrieve an item list from the server as an ItemList object :type item_list_name: String :param item_list_name: name of the item list to retrieve :type category: String :param category: the category of lists to fetch. At the time of writing, supported values are "own" and "shared" :rtype: ItemList :returns: The ItemList :raises: APIError if the request was not successful
def trim_docstring(docstring): """Removes indentation from triple-quoted strings. This is the function specified in PEP 257 to handle docstrings: https://www.python.org/dev/peps/pep-0257/. Args: docstring: str, a python docstring. Returns: str, docstring with indentation removed. """ if not docstring: return '' # If you've got a line longer than this you have other problems... max_indent = 1 << 29 # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = max_indent for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < max_indent: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed)
Removes indentation from triple-quoted strings. This is the function specified in PEP 257 to handle docstrings: https://www.python.org/dev/peps/pep-0257/. Args: docstring: str, a python docstring. Returns: str, docstring with indentation removed.
def _getFirstOnBit(self, input): """ Return the bit offset of the first bit to be set in the encoder output. For periodic encoders, this can be a negative number when the encoded output wraps around. """ if input == SENTINEL_VALUE_FOR_MISSING_DATA: return [None] else: if input < self.minval: # Don't clip periodic inputs. Out-of-range input is always an error if self.clipInput and not self.periodic: if self.verbosity > 0: print "Clipped input %s=%.2f to minval %.2f" % (self.name, input, self.minval) input = self.minval else: raise Exception('input (%s) less than range (%s - %s)' % (str(input), str(self.minval), str(self.maxval))) if self.periodic: # Don't clip periodic inputs. Out-of-range input is always an error if input >= self.maxval: raise Exception('input (%s) greater than periodic range (%s - %s)' % (str(input), str(self.minval), str(self.maxval))) else: if input > self.maxval: if self.clipInput: if self.verbosity > 0: print "Clipped input %s=%.2f to maxval %.2f" % (self.name, input, self.maxval) input = self.maxval else: raise Exception('input (%s) greater than range (%s - %s)' % (str(input), str(self.minval), str(self.maxval))) if self.periodic: centerbin = int((input - self.minval) * self.nInternal / self.range) \ + self.padding else: centerbin = int(((input - self.minval) + self.resolution/2) \ / self.resolution ) + self.padding # We use the first bit to be set in the encoded output as the bucket index minbin = centerbin - self.halfwidth return [minbin]
Return the bit offset of the first bit to be set in the encoder output. For periodic encoders, this can be a negative number when the encoded output wraps around.
def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow.
def make_ranges(self, file_url): """ Divides file_url size into an array of ranges to be downloaded by workers. :param: file_url: ProjectFileUrl: file url to download :return: [(int,int)]: array of (start, end) tuples """ size = file_url.size bytes_per_chunk = self.determine_bytes_per_chunk(size) start = 0 ranges = [] while size > 0: amount = bytes_per_chunk if amount > size: amount = size ranges.append((start, start + amount - 1)) start += amount size -= amount return ranges
Divides file_url size into an array of ranges to be downloaded by workers. :param: file_url: ProjectFileUrl: file url to download :return: [(int,int)]: array of (start, end) tuples
def standardDeviation2d(img, ksize=5, blurred=None): ''' calculate the spatial resolved standard deviation for a given 2d array ksize -> kernel size blurred(optional) -> with same ksize gaussian filtered image setting this parameter reduces processing time ''' if ksize not in (list, tuple): ksize = (ksize,ksize) if blurred is None: blurred = gaussian_filter(img, ksize) else: assert blurred.shape == img.shape std = np.empty_like(img) _calc(img, ksize[0], ksize[1], blurred, std) return std
calculate the spatial resolved standard deviation for a given 2d array ksize -> kernel size blurred(optional) -> with same ksize gaussian filtered image setting this parameter reduces processing time