code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def iterprogress( sized_iterable ): """ Iterate something printing progress bar to stdout """ pb = ProgressBar( 0, len( sized_iterable ) ) for i, value in enumerate( sized_iterable ): yield value pb.update_and_print( i, sys.stderr )
Iterate something printing progress bar to stdout
def session(self): """ Get session object to benefit from connection pooling. http://docs.python-requests.org/en/master/user/advanced/#session-objects :rtype: requests.Session """ if self._session is None: self._session = requests.Session() self._session.headers.update(self._headers) return self._session
Get session object to benefit from connection pooling. http://docs.python-requests.org/en/master/user/advanced/#session-objects :rtype: requests.Session
def remove_device(self, device: Union[DeviceType, str]) -> None: """Remove the given |Node| or |Element| object from the actual |Nodes| or |Elements| object. You can pass either a string or a device: >>> from hydpy import Node, Nodes >>> nodes = Nodes('node_x', 'node_y') >>> node_x, node_y = nodes >>> nodes.remove_device(Node('node_y')) >>> nodes Nodes("node_x") >>> nodes.remove_device(Node('node_x')) >>> nodes Nodes() >>> nodes.remove_device(Node('node_z')) Traceback (most recent call last): ... ValueError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: The actual Nodes object does \ not handle such a device. Method |Devices.remove_device| is disabled for immutable |Nodes| and |Elements| objects: >>> nodes.mutable = False >>> nodes.remove_device('node_z') Traceback (most recent call last): ... RuntimeError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: Removing devices from \ immutable Nodes objects is not allowed. """ try: if self.mutable: _device = self.get_contentclass()(device) try: del self._name2device[_device.name] except KeyError: raise ValueError( f'The actual {objecttools.classname(self)} ' f'object does not handle such a device.') del _id2devices[_device][id(self)] else: raise RuntimeError( f'Removing devices from immutable ' f'{objecttools.classname(self)} objects is not allowed.') except BaseException: objecttools.augment_excmessage( f'While trying to remove the device `{device}` from a ' f'{objecttools.classname(self)} object')
Remove the given |Node| or |Element| object from the actual |Nodes| or |Elements| object. You can pass either a string or a device: >>> from hydpy import Node, Nodes >>> nodes = Nodes('node_x', 'node_y') >>> node_x, node_y = nodes >>> nodes.remove_device(Node('node_y')) >>> nodes Nodes("node_x") >>> nodes.remove_device(Node('node_x')) >>> nodes Nodes() >>> nodes.remove_device(Node('node_z')) Traceback (most recent call last): ... ValueError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: The actual Nodes object does \ not handle such a device. Method |Devices.remove_device| is disabled for immutable |Nodes| and |Elements| objects: >>> nodes.mutable = False >>> nodes.remove_device('node_z') Traceback (most recent call last): ... RuntimeError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: Removing devices from \ immutable Nodes objects is not allowed.
def get_dsn(d): """ Get the dataset name from a record :param dict d: Metadata :return str: Dataset name """ try: return d["dataSetName"] except Exception as e: logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e)) exit(1)
Get the dataset name from a record :param dict d: Metadata :return str: Dataset name
def check_ontology(fname): """ reads the ontology yaml file and does basic verifcation """ with open(fname, 'r') as stream: y = yaml.safe_load(stream) import pprint pprint.pprint(y)
reads the ontology yaml file and does basic verifcation
def normalize_path(path): """ Convert a path to its canonical, case-normalized, absolute version. """ return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
Convert a path to its canonical, case-normalized, absolute version.
def _authn_response(self, in_response_to, consumer_url, sp_entity_id, identity=None, name_id=None, status=None, authn=None, issuer=None, policy=None, sign_assertion=False, sign_response=False, best_effort=False, encrypt_assertion=False, encrypt_cert_advice=None, encrypt_cert_assertion=None, authn_statement=None, encrypt_assertion_self_contained=False, encrypted_advice_attributes=False, pefim=False, sign_alg=None, digest_alg=None, farg=None, session_not_on_or_after=None): """ Create a response. A layer of indirection. :param in_response_to: The session identifier of the request :param consumer_url: The URL which should receive the response :param sp_entity_id: The entity identifier of the SP :param identity: A dictionary with attributes and values that are expected to be the bases for the assertion in the response. :param name_id: The identifier of the subject :param status: The status of the response :param authn: A dictionary containing information about the authn context. :param issuer: The issuer of the response :param policy: :param sign_assertion: Whether the assertion should be signed or not :param sign_response: Whether the response should be signed or not :param best_effort: Even if not the SPs demands can be met send a response. :param encrypt_assertion: True if assertions should be encrypted. :param encrypt_assertion_self_contained: True if all encrypted assertions should have alla namespaces selfcontained. :param encrypted_advice_attributes: True if assertions in the advice element should be encrypted. :param encrypt_cert_advice: Certificate to be used for encryption of assertions in the advice element. :param encrypt_cert_assertion: Certificate to be used for encryption of assertions. :param authn_statement: Authentication statement. :param sign_assertion: True if assertions should be signed. :param pefim: True if a response according to the PEFIM profile should be created. :param farg: Argument to pass on to the assertion constructor :return: A response instance """ if farg is None: assertion_args = {} args = {} # if identity: _issuer = self._issuer(issuer) # if encrypt_assertion and show_nameid: # tmp_name_id = name_id # name_id = None # name_id = None # tmp_authn = authn # authn = None # tmp_authn_statement = authn_statement # authn_statement = None if pefim: encrypted_advice_attributes = True encrypt_assertion_self_contained = True assertion_attributes = self.setup_assertion( None, sp_entity_id, None, None, None, policy, None, None, identity, best_effort, sign_response, farg=farg) assertion = self.setup_assertion( authn, sp_entity_id, in_response_to, consumer_url, name_id, policy, _issuer, authn_statement, [], True, sign_response, farg=farg, session_not_on_or_after=session_not_on_or_after) assertion.advice = saml.Advice() # assertion.advice.assertion_id_ref.append(saml.AssertionIDRef()) # assertion.advice.assertion_uri_ref.append(saml.AssertionURIRef()) assertion.advice.assertion.append(assertion_attributes) else: assertion = self.setup_assertion( authn, sp_entity_id, in_response_to, consumer_url, name_id, policy, _issuer, authn_statement, identity, True, sign_response, farg=farg, session_not_on_or_after=session_not_on_or_after) to_sign = [] if not encrypt_assertion: if sign_assertion: assertion.signature = pre_signature_part(assertion.id, self.sec.my_cert, 2, sign_alg=sign_alg, digest_alg=digest_alg) to_sign.append((class_name(assertion), assertion.id)) args["assertion"] = assertion if (self.support_AssertionIDRequest() or self.support_AuthnQuery()): self.session_db.store_assertion(assertion, to_sign) return self._response( in_response_to, consumer_url, status, issuer, sign_response, to_sign, sp_entity_id=sp_entity_id, encrypt_assertion=encrypt_assertion, encrypt_cert_advice=encrypt_cert_advice, encrypt_cert_assertion=encrypt_cert_assertion, encrypt_assertion_self_contained=encrypt_assertion_self_contained, encrypted_advice_attributes=encrypted_advice_attributes, sign_assertion=sign_assertion, pefim=pefim, sign_alg=sign_alg, digest_alg=digest_alg, **args)
Create a response. A layer of indirection. :param in_response_to: The session identifier of the request :param consumer_url: The URL which should receive the response :param sp_entity_id: The entity identifier of the SP :param identity: A dictionary with attributes and values that are expected to be the bases for the assertion in the response. :param name_id: The identifier of the subject :param status: The status of the response :param authn: A dictionary containing information about the authn context. :param issuer: The issuer of the response :param policy: :param sign_assertion: Whether the assertion should be signed or not :param sign_response: Whether the response should be signed or not :param best_effort: Even if not the SPs demands can be met send a response. :param encrypt_assertion: True if assertions should be encrypted. :param encrypt_assertion_self_contained: True if all encrypted assertions should have alla namespaces selfcontained. :param encrypted_advice_attributes: True if assertions in the advice element should be encrypted. :param encrypt_cert_advice: Certificate to be used for encryption of assertions in the advice element. :param encrypt_cert_assertion: Certificate to be used for encryption of assertions. :param authn_statement: Authentication statement. :param sign_assertion: True if assertions should be signed. :param pefim: True if a response according to the PEFIM profile should be created. :param farg: Argument to pass on to the assertion constructor :return: A response instance
def only_newer(copy_func): """ Wrap a copy function (like shutil.copy2) to return the dst if it's newer than the source. """ @functools.wraps(copy_func) def wrapper(src, dst, *args, **kwargs): is_newer_dst = ( dst.exists() and dst.getmtime() >= src.getmtime() ) if is_newer_dst: return dst return copy_func(src, dst, *args, **kwargs) return wrapper
Wrap a copy function (like shutil.copy2) to return the dst if it's newer than the source.
def canonicalize_edge(edge_data: EdgeData) -> Tuple[str, Optional[Tuple], Optional[Tuple]]: """Canonicalize the edge to a tuple based on the relation, subject modifications, and object modifications.""" return ( edge_data[RELATION], _canonicalize_edge_modifications(edge_data.get(SUBJECT)), _canonicalize_edge_modifications(edge_data.get(OBJECT)), )
Canonicalize the edge to a tuple based on the relation, subject modifications, and object modifications.
def plugin_for(cls, model): ''' Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered. ''' logger.debug("Getting a plugin for: %s", model) if not issubclass(model, Model): return if model in cls.plugins: return cls.plugins[model] for b in model.__bases__: p = cls.plugin_for(b) if p: return p
Find and return a plugin for this model. Uses inheritance to find a model where the plugin is registered.
def LoadFromXml(self, node, handle): """ Method updates the object from the xml representation of the managed object. """ self.SetHandle(handle) if node.hasAttributes(): # attributes = node._get_attributes() # attCount = attributes._get_length() attributes = node.attributes attCount = len(attributes) for i in range(attCount): attNode = attributes.item(i) # attr = UcsUtils.WordU(attNode._get_name()) attr = UcsUtils.WordU(attNode.localName) if (UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId) != None): if (attr in UcsUtils.GetUcsPropertyMetaAttributeList(self.classId)): # self.setattr(attr, str(attNode.nodeValue)) self.setattr(attr, str(attNode.value)) else: # self.setattr(UcsUtils.WordU(attr), str(attNode.nodeValue)) self.setattr(UcsUtils.WordU(attr), str(attNode.value)) else: # self.setattr(UcsUtils.WordU(attr), str(attNode.nodeValue)) self.setattr(UcsUtils.WordU(attr), str(attNode.value)) if self.getattr("Rn") == None and self.getattr("Dn") != None: self.setattr("Rn", str(re.sub(r'^.*/', '', self.getattr("Dn")))) if (node.hasChildNodes()): # childList = node._get_childNodes() # childCount = childList._get_length() childList = node.childNodes childCount = len(childList) for i in range(childCount): childNode = childList.item(i) if (childNode.nodeType != Node.ELEMENT_NODE): continue if childNode.localName in self.propMoMeta.fieldNames: # .LoadFromXml(childNode, handle) pass # TODO: Need code analysis. # if childNode.localName in self.propMoMeta.childFieldNames: c = ManagedObject(UcsUtils.WordU(childNode.localName)) self.child.append(c) c.LoadFromXml(childNode, handle)
Method updates the object from the xml representation of the managed object.
def get_src_or_dst(mode, path_type): """ User sets the path to a LiPD source location :param str mode: "read" or "write" mode :param str path_type: "directory" or "file" :return str path: dir path to files :return list files: files chosen """ logger_directory.info("enter set_src_or_dst") _path = "" _files = "" invalid = True count = 0 # Did you forget to enter a mode? if not mode: invalid = False # Special case for gui reading a single or multi-select file(s). elif mode == "read" and path_type == "file": _path, _files = browse_dialog_file() # Return early to skip prompts, since they're not needed return _path, _files # All other cases, prompt user to choose directory else: prompt = get_src_or_dst_prompt(mode) # Loop max of 3 times, then default to Downloads folder if too many failed attempts while invalid and prompt: # Prompt user to choose target path _path, count = get_src_or_dst_path(prompt, count) if _path: invalid = False logger_directory.info("exit set_src_or_dst") return _path, _files
User sets the path to a LiPD source location :param str mode: "read" or "write" mode :param str path_type: "directory" or "file" :return str path: dir path to files :return list files: files chosen
def main(master_dsn, slave_dsn, tables, blocking=False): """DB Replication app. This script will replicate data from mysql master to other databases( including mysql, postgres, sqlite). This script only support a very limited replication: 1. data only. The script only replicates data, so you have to make sure the tables already exists in slave db. 2. pk only. The script replicate data by pk, when a row_pk changed, it retrieve it from master and write in to slave. :param master_dsn: mysql dsn with row-based binlog enabled. :param slave_dsn: slave dsn, most databases supported including mysql, postgres, sqlite etc. :param tables: the tables need to be replicated :param blocking: by default, the script only reads existing binlog, replicate them and exit. if set to True, this script will run as a daemon and wait for more mysql binlog and do replicates. """ # currently only supports mysql master assert master_dsn.startswith("mysql") logger = logging.getLogger(__name__) logger.info("replicating tables: %s" % ", ".join(tables)) repl_db_sub(master_dsn, slave_dsn, tables) mysql_pub(master_dsn, blocking=blocking)
DB Replication app. This script will replicate data from mysql master to other databases( including mysql, postgres, sqlite). This script only support a very limited replication: 1. data only. The script only replicates data, so you have to make sure the tables already exists in slave db. 2. pk only. The script replicate data by pk, when a row_pk changed, it retrieve it from master and write in to slave. :param master_dsn: mysql dsn with row-based binlog enabled. :param slave_dsn: slave dsn, most databases supported including mysql, postgres, sqlite etc. :param tables: the tables need to be replicated :param blocking: by default, the script only reads existing binlog, replicate them and exit. if set to True, this script will run as a daemon and wait for more mysql binlog and do replicates.
def main(verbose=True): """Build and debug an application programatically For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html """ # Build C program find_executable(MAKE_CMD) if not find_executable(MAKE_CMD): print( 'Could not find executable "%s". Ensure it is installed and on your $PATH.' % MAKE_CMD ) exit(1) subprocess.check_output([MAKE_CMD, "-C", SAMPLE_C_CODE_DIR, "--quiet"]) # Initialize object that manages gdb subprocess gdbmi = GdbController(verbose=verbose) # Send gdb commands. Gdb machine interface commands are easier to script around, # hence the name "machine interface". # Responses are automatically printed as they are received if verbose is True. # Responses are returned after writing, by default. # Load the file responses = gdbmi.write("-file-exec-and-symbols %s" % SAMPLE_C_BINARY) # Get list of source files used to compile the binary responses = gdbmi.write("-file-list-exec-source-files") # Add breakpoint responses = gdbmi.write("-break-insert main") # Run responses = gdbmi.write("-exec-run") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-continue") # noqa: F841 # gdbmi.gdb_process will be None because the gdb subprocess (and its inferior # program) will be terminated gdbmi.exit()
Build and debug an application programatically For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html
def deserialize_properties(props_struct: struct_pb2.Struct) -> Any: """ Deserializes a protobuf `struct_pb2.Struct` into a Python dictionary containing normal Python types. """ # Check out this link for details on what sort of types Protobuf is going to generate: # https://developers.google.com/protocol-buffers/docs/reference/python-generated # # We assume that we are deserializing properties that we got from a Resource RPC endpoint, # which has type `Struct` in our gRPC proto definition. if _special_sig_key in props_struct: if props_struct[_special_sig_key] == _special_asset_sig: # This is an asset. Re-hydrate this object into an Asset. if "path" in props_struct: return known_types.new_file_asset(props_struct["path"]) if "text" in props_struct: return known_types.new_string_asset(props_struct["text"]) if "uri" in props_struct: return known_types.new_remote_asset(props_struct["uri"]) raise AssertionError("Invalid asset encountered when unmarshaling resource property") elif props_struct[_special_sig_key] == _special_archive_sig: # This is an archive. Re-hydrate this object into an Archive. if "assets" in props_struct: return known_types.new_asset_archive(deserialize_property(props_struct["assets"])) if "path" in props_struct: return known_types.new_file_archive(props_struct["path"]) if "uri" in props_struct: return known_types.new_remote_archive(props_struct["uri"]) elif props_struct[_special_sig_key] == _special_secret_sig: raise AssertionError("this version of the Pulumi SDK does not support first-class secrets") raise AssertionError("Unrecognized signature when unmarshaling resource property") # Struct is duck-typed like a dictionary, so we can iterate over it in the normal ways. output = {} for k, v in list(props_struct.items()): value = deserialize_property(v) # We treat values that deserialize to "None" as if they don't exist. if value is not None: output[k] = value return output
Deserializes a protobuf `struct_pb2.Struct` into a Python dictionary containing normal Python types.
def do_pot(self): """ Sync the template with the python code. """ files_to_translate = [] log.debug("Collecting python sources for pot ...") for source_path in self._source_paths: for source_path in self._iter_suffix(path=source_path, suffix=".py"): log.debug("... add to pot: {source}".format(source=str(source_path))) files_to_translate.append(str(source_path)) for system_file in self.SYSTEM_SOURCE_FILES: files_to_translate.append(str(self._system_path / system_file)) # FIXME: use separate domain for system source translations? Nerge them when generating mo's? log.debug("Finished collection sources.") pot_path = (self._po_path / self._basename).with_suffix(".pot") command = ["xgettext", "--keyword=_", "--keyword=_translate", "--output={output}".format(output=str(pot_path))] command.extend(files_to_translate) check_call(command) log.debug("pot file \"{pot}\" created!".format(pot=str(pot_path))) pot_copy_path = self._mo_path / pot_path.name log.debug("Copying pot file to mo path: {pot_copy_path}".format(pot_copy_path=str(pot_copy_path))) shutil.copy(str(pot_path), str(pot_copy_path))
Sync the template with the python code.
def IDENTITY(val): ''' This is a basic "equality" index keygen, primarily meant to be used for things like:: Model.query.filter(col='value') Where ``FULL_TEXT`` would transform a sentence like "A Simple Sentence" into an inverted index searchable by the words "a", "simple", and/or "sentence", ``IDENTITY`` will only be searchable by the orginal full sentence with the same capitalization - "A Simple Sentence". See ``IDENTITY_CI`` for the same function, only case-insensitive. ''' if not val: return None if not isinstance(val, six.string_types_ex): val = str(val) return [val]
This is a basic "equality" index keygen, primarily meant to be used for things like:: Model.query.filter(col='value') Where ``FULL_TEXT`` would transform a sentence like "A Simple Sentence" into an inverted index searchable by the words "a", "simple", and/or "sentence", ``IDENTITY`` will only be searchable by the orginal full sentence with the same capitalization - "A Simple Sentence". See ``IDENTITY_CI`` for the same function, only case-insensitive.
def near_reduce(self, coords_set, threshold=1e-4): """ Prunes coordinate set for coordinates that are within threshold Args: coords_set (Nx3 array-like): list or array of coordinates threshold (float): threshold value for distance """ unique_coords = [] coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set] for coord in coords_set: if not in_coord_list_pbc(unique_coords, coord, threshold): unique_coords += [coord] return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
Prunes coordinate set for coordinates that are within threshold Args: coords_set (Nx3 array-like): list or array of coordinates threshold (float): threshold value for distance
def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32, random=RandomType.NORANDOM): """ obtain the data for activity-based voxel selection using Searchlight Average the activity within epochs and z-scoring within subject, while maintaining the 3D brain structure. In order to save memory, the data is processed subject by subject instead of reading all in before processing. Assuming all subjects live in the identical cube. Parameters ---------- images: Iterable[SpatialImage] Data. conditions: List[UniqueLabelConditionSpec] Condition specification. data_type Type to cast image to. random: Optional[RandomType] Randomize the image data within subject or not. Returns ------- processed_data: 4D array in shape [brain 3D + epoch] averaged epoch by epoch processed data labels: 1D array contains labels of the data """ time1 = time.time() epoch_info = generate_epochs_info(conditions) num_epochs = len(epoch_info) processed_data = None logger.info( 'there are %d subjects, and in total %d epochs' % (len(conditions), num_epochs) ) labels = np.empty(num_epochs) # assign labels for idx, epoch in enumerate(epoch_info): labels[idx] = epoch[0] # counting the epochs per subject for z-scoring subject_count = np.zeros(len(conditions), dtype=np.int32) logger.info('start to apply masks and separate epochs') for sid, f in enumerate(images): data = f.get_data().astype(data_type) [d1, d2, d3, d4] = data.shape if random == RandomType.REPRODUCIBLE: data = data.reshape((d1 * d2 * d3, d4)) _randomize_single_subject(data, seed=sid) data = data.reshape((d1, d2, d3, d4)) elif random == RandomType.UNREPRODUCIBLE: data = data.reshape((d1 * d2 * d3, d4)) _randomize_single_subject(data) data = data.reshape((d1, d2, d3, d4)) if processed_data is None: processed_data = np.empty([d1, d2, d3, num_epochs], dtype=data_type) # averaging for idx, epoch in enumerate(epoch_info): if sid == epoch[1]: subject_count[sid] += 1 processed_data[:, :, :, idx] = \ np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3) logger.debug( 'file %s is loaded and processed, with data shape %s', f.get_filename(), data.shape ) # z-scoring cur_epoch = 0 for i in subject_count: if i > 1: processed_data[:, :, :, cur_epoch:cur_epoch + i] = \ zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i], axis=3, ddof=0) cur_epoch += i # if zscore fails (standard deviation is zero), # set all values to be zero processed_data = np.nan_to_num(processed_data) time2 = time.time() logger.info( 'data processed for activity-based voxel selection, takes %.2f s' % (time2 - time1) ) return processed_data, labels
obtain the data for activity-based voxel selection using Searchlight Average the activity within epochs and z-scoring within subject, while maintaining the 3D brain structure. In order to save memory, the data is processed subject by subject instead of reading all in before processing. Assuming all subjects live in the identical cube. Parameters ---------- images: Iterable[SpatialImage] Data. conditions: List[UniqueLabelConditionSpec] Condition specification. data_type Type to cast image to. random: Optional[RandomType] Randomize the image data within subject or not. Returns ------- processed_data: 4D array in shape [brain 3D + epoch] averaged epoch by epoch processed data labels: 1D array contains labels of the data
async def synchronize(self, pid, vendor_specific=None): """Send an object synchronization request to the CN.""" return await self._request_pyxb( "post", ["synchronize", pid], {}, mmp_dict={"pid": pid}, vendor_specific=vendor_specific, )
Send an object synchronization request to the CN.
def p_compilerDirective(p): """compilerDirective : '#' PRAGMA pragmaName '(' pragmaParameter ')'""" directive = p[3].lower() param = p[5] if directive == 'include': fname = param if p.parser.file: if os.path.dirname(p.parser.file): fname = os.path.join(os.path.dirname(p.parser.file), fname) p.parser.mofcomp.compile_file(fname, p.parser.handle.default_namespace) elif directive == 'namespace': p.parser.handle.default_namespace = param if param not in p.parser.qualcache: p.parser.qualcache[param] = NocaseDict() p[0] = None
compilerDirective : '#' PRAGMA pragmaName '(' pragmaParameter ')
def start(self): """ Starts the #ThreadPool. Must be ended with #stop(). Use the context-manager interface to ensure starting and the #ThreadPool. """ if self.__running: raise RuntimeError('ThreadPool already running') [t.start() for t in self.__threads] self.__running = True
Starts the #ThreadPool. Must be ended with #stop(). Use the context-manager interface to ensure starting and the #ThreadPool.
def get_reply(self, message): """ 根据 message 的内容获取 Reply 对象。 :param message: 要处理的 message :return: 获取的 Reply 对象 """ session_storage = self.session_storage id = None session = None if session_storage and hasattr(message, "source"): id = to_binary(message.source) session = session_storage[id] handlers = self.get_handlers(message.type) try: for handler, args_count in handlers: args = [message, session][:args_count] reply = handler(*args) if session_storage and id: session_storage[id] = session if reply: return process_function_reply(reply, message=message) except: self.logger.exception("Catch an exception")
根据 message 的内容获取 Reply 对象。 :param message: 要处理的 message :return: 获取的 Reply 对象
def pip_r(self, requirements, raise_on_error=True): """ Install all requirements contained in the given file path Waits for command to finish. Parameters ---------- requirements: str Path to requirements.txt raise_on_error: bool, default True If True then raise ValueError if stderr is not empty """ cmd = "pip install -r %s" % requirements return self.wait(cmd, raise_on_error=raise_on_error)
Install all requirements contained in the given file path Waits for command to finish. Parameters ---------- requirements: str Path to requirements.txt raise_on_error: bool, default True If True then raise ValueError if stderr is not empty
def grid_widgets(self): """Put widgets in the grid""" sticky = {"sticky": "nswe"} self.label.grid(row=1, column=1, columnspan=2, **sticky) self.dropdown.grid(row=2, column=1, **sticky) self.entry.grid(row=2, column=2, **sticky) self.button.grid(row=3, column=1, columnspan=2, **sticky) self.radio_one.grid(row=4, column=1, **sticky) self.radio_two.grid(row=4, column=2, **sticky) self.checked.grid(row=5, column=1, **sticky) self.unchecked.grid(row=5, column=2, **sticky) self.scroll.grid(row=1, column=3, rowspan=8, padx=5, **sticky) self.tree.grid(row=6, column=1, columnspan=2, **sticky) self.scale_entry.grid(row=7, column=1, columnspan=2, **sticky) self.combo.grid(row=8, column=1, columnspan=2, **sticky) self.progress.grid(row=9, column=1, columnspan=2, padx=5, pady=5, **sticky)
Put widgets in the grid
def __clear_covers(self): """Clear all covered matrix cells""" for i in range(self.n): self.row_covered[i] = False self.col_covered[i] = False
Clear all covered matrix cells
def get_rest_token(self): """ Returns an auth token for making calls to eventhub REST API. :rtype: str """ uri = urllib.parse.quote_plus( "https://{}.{}/{}".format(self.sb_name, self.namespace_suffix, self.eh_name)) sas = self.sas_key.encode('utf-8') expiry = str(int(time.time() + 10000)) string_to_sign = ('{}\n{}'.format(uri, expiry)).encode('utf-8') signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256) signature = urllib.parse.quote(base64.b64encode(signed_hmac_sha256.digest())) return 'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \ .format(uri, signature, expiry, self.policy)
Returns an auth token for making calls to eventhub REST API. :rtype: str
def parse_data(data, type, **kwargs): """ Return an OSM networkx graph from the input OSM data Parameters ---------- data : string type : string ('xml' or 'pbf') >>> graph = parse_data(data, 'xml') """ suffixes = { 'xml': '.osm', 'pbf': '.pbf', } try: suffix = suffixes[type] except KeyError: raise ValueError('Unknown data type "%s"' % type) fd, filename = tempfile.mkstemp(suffix=suffix) try: os.write(fd, data) os.close(fd) return parse_file(filename, **kwargs) finally: os.remove(filename)
Return an OSM networkx graph from the input OSM data Parameters ---------- data : string type : string ('xml' or 'pbf') >>> graph = parse_data(data, 'xml')
def get_visible_elements(self, locator, params=None, timeout=None): """ Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_elements(locator, params, timeout, True)
Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance
def visible_to_user(self, user): """Get a list of visible polls for a given user (usually request.user). These visible polls will be those that either have no groups assigned to them (and are therefore public) or those in which the user is a member. """ return Poll.objects.filter(Q(groups__in=user.groups.all()) | Q(groups__isnull=True))
Get a list of visible polls for a given user (usually request.user). These visible polls will be those that either have no groups assigned to them (and are therefore public) or those in which the user is a member.
def get_version_from_tag(tag_name: str) -> Optional[str]: """Get git hash from tag :param tag_name: Name of the git tag (i.e. 'v1.0.0') :return: sha1 hash of the commit """ debug('get_version_from_tag({})'.format(tag_name)) check_repo() for i in repo.tags: if i.name == tag_name: return i.commit.hexsha return None
Get git hash from tag :param tag_name: Name of the git tag (i.e. 'v1.0.0') :return: sha1 hash of the commit
def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167): """ Compute an hrf as the difference of two gamma functions Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional (default=16) temporal oversampling factor time_length : float, optional (default=32) hrf kernel length, in seconds onset: float onset time of the hrf delay: float, optional delay parameter of the hrf (in s.) undershoot: float, optional undershoot parameter of the hrf (in s.) dispersion : float, optional dispersion parameter for the first gamma function u_dispersion : float, optional dispersion parameter for the second gamma function ratio : float, optional ratio of the two gamma components Returns ------- hrf : array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid """ from scipy.stats import gamma dt = tr / oversampling time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int)) time_stamps -= onset hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\ ratio * gamma.pdf( time_stamps, undershoot / u_dispersion, dt / u_dispersion) hrf /= hrf.sum() return hrf
Compute an hrf as the difference of two gamma functions Parameters ---------- tr : float scan repeat time, in seconds oversampling : int, optional (default=16) temporal oversampling factor time_length : float, optional (default=32) hrf kernel length, in seconds onset: float onset time of the hrf delay: float, optional delay parameter of the hrf (in s.) undershoot: float, optional undershoot parameter of the hrf (in s.) dispersion : float, optional dispersion parameter for the first gamma function u_dispersion : float, optional dispersion parameter for the second gamma function ratio : float, optional ratio of the two gamma components Returns ------- hrf : array of shape(length / tr * oversampling, dtype=float) hrf sampling on the oversampled time grid
def load(self, path): """ Load the catalog from file Parameters ---------- path: str The path to the file """ # Get the object DB = joblib.load(path) # Load the attributes self.catalog = DB.catalog self.n_sources = DB.n_sources self.name = DB.name self.history = DB.history del DB
Load the catalog from file Parameters ---------- path: str The path to the file
def clean_single_word(word, lemmatizing="wordnet"): """ Performs stemming or lemmatizing on a single word. If we are to search for a word in a clean bag-of-words, we need to search it after the same kind of preprocessing. Inputs: - word: A string containing the source word. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - lemma: The resulting clean lemma or stem. """ if lemmatizing == "porter": porter = PorterStemmer() lemma = porter.stem(word) elif lemmatizing == "snowball": snowball = SnowballStemmer('english') lemma = snowball.stem(word) elif lemmatizing == "wordnet": wordnet = WordNetLemmatizer() lemma = wordnet.lemmatize(word) else: print("Invalid lemmatizer argument.") raise RuntimeError return lemma
Performs stemming or lemmatizing on a single word. If we are to search for a word in a clean bag-of-words, we need to search it after the same kind of preprocessing. Inputs: - word: A string containing the source word. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - lemma: The resulting clean lemma or stem.
def get_index_list(self, as_json=False): """ get list of indices and codes params: as_json: True | False returns: a list | json of index codes """ url = self.index_url req = Request(url, None, self.headers) # raises URLError or HTTPError resp = self.opener.open(req) resp = byte_adaptor(resp) resp_list = json.load(resp)['data'] index_list = [str(item['name']) for item in resp_list] return self.render_response(index_list, as_json)
get list of indices and codes params: as_json: True | False returns: a list | json of index codes
def main(): """Parse the command-line arguments and run the tool.""" parser = argparse.ArgumentParser(description = 'XMPP version checker', parents = [XMPPSettings.get_arg_parser()]) parser.add_argument('source', metavar = 'SOURCE', help = 'Source JID') parser.add_argument('target', metavar = 'TARGET', nargs = '?', help = 'Target JID (default: domain of SOURCE)') parser.add_argument('--debug', action = 'store_const', dest = 'log_level', const = logging.DEBUG, default = logging.INFO, help = 'Print debug messages') parser.add_argument('--quiet', const = logging.ERROR, action = 'store_const', dest = 'log_level', help = 'Print only error messages') args = parser.parse_args() settings = XMPPSettings() settings.load_arguments(args) if settings.get("password") is None: password = getpass("{0!r} password: ".format(args.source)) if sys.version_info.major < 3: password = password.decode("utf-8") settings["password"] = password if sys.version_info.major < 3: args.source = args.source.decode("utf-8") source = JID(args.source) if args.target: if sys.version_info.major < 3: args.target = args.target.decode("utf-8") target = JID(args.target) else: target = JID(source.domain) logging.basicConfig(level = args.log_level) checker = VersionChecker(source, target, settings) try: checker.run() except KeyboardInterrupt: checker.disconnect()
Parse the command-line arguments and run the tool.
def add_side_to_basket(self, item, quantity=1): ''' Add a side to the current basket. :param Item item: Item from menu. :param int quantity: The quantity of side to be added. :return: A response having added a side to the current basket. :rtype: requests.Response ''' item_variant = item[VARIANT.PERSONAL] params = { 'productSkuId': item_variant['productSkuId'], 'quantity': quantity, 'ComplimentaryItems': [] } return self.__post('/Basket/AddProduct', json=params)
Add a side to the current basket. :param Item item: Item from menu. :param int quantity: The quantity of side to be added. :return: A response having added a side to the current basket. :rtype: requests.Response
def simple_cache(func): """ Save results for the :meth:'path.using_module' classmethod. When Python 3.2 is available, use functools.lru_cache instead. """ saved_results = {} def wrapper(cls, module): if module in saved_results: return saved_results[module] saved_results[module] = func(cls, module) return saved_results[module] return wrapper
Save results for the :meth:'path.using_module' classmethod. When Python 3.2 is available, use functools.lru_cache instead.
def check(actions, request, target=None): """Check user permission. Check if the user has permission to the action according to policy setting. :param actions: list of scope and action to do policy checks on, the composition of which is (scope, action). Multiple actions are treated as a logical AND. * scope: service type managing the policy for action * action: string representing the action to be checked this should be colon separated for clarity. i.e. | compute:create_instance | compute:attach_volume | volume:attach_volume for a policy action that requires a single action, actions should look like | "(("compute", "compute:create_instance"),)" for a multiple action check, actions should look like | "(("identity", "identity:list_users"), | ("identity", "identity:list_roles"))" :param request: django http request object. If not specified, credentials must be passed. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. {'project_id': object.project_id} :returns: boolean if the user has permission or not for the actions. """ if target is None: target = {} user = auth_utils.get_user(request) # Several service policy engines default to a project id check for # ownership. Since the user is already scoped to a project, if a # different project id has not been specified use the currently scoped # project's id. # # The reason is the operator can edit the local copies of the service # policy file. If a rule is removed, then the default rule is used. We # don't want to block all actions because the operator did not fully # understand the implication of editing the policy file. Additionally, # the service APIs will correct us if we are too permissive. if target.get('project_id') is None: target['project_id'] = user.project_id if target.get('tenant_id') is None: target['tenant_id'] = target['project_id'] # same for user_id if target.get('user_id') is None: target['user_id'] = user.id domain_id_keys = [ 'domain_id', 'project.domain_id', 'user.domain_id', 'group.domain_id' ] # populates domain id keys with user's current domain id for key in domain_id_keys: if target.get(key) is None: target[key] = user.user_domain_id credentials = _user_to_credentials(user) domain_credentials = _domain_to_credentials(request, user) # if there is a domain token use the domain_id instead of the user's domain if domain_credentials: credentials['domain_id'] = domain_credentials.get('domain_id') enforcer = _get_enforcer() for action in actions: scope, action = action[0], action[1] if scope in enforcer: # this is for handling the v3 policy file and will only be # needed when a domain scoped token is present if scope == 'identity' and domain_credentials: # use domain credentials if not _check_credentials(enforcer[scope], action, target, domain_credentials): return False # use project credentials if not _check_credentials(enforcer[scope], action, target, credentials): return False # if no policy for scope, allow action, underlying API will # ultimately block the action if not permitted, treat as though # allowed return True
Check user permission. Check if the user has permission to the action according to policy setting. :param actions: list of scope and action to do policy checks on, the composition of which is (scope, action). Multiple actions are treated as a logical AND. * scope: service type managing the policy for action * action: string representing the action to be checked this should be colon separated for clarity. i.e. | compute:create_instance | compute:attach_volume | volume:attach_volume for a policy action that requires a single action, actions should look like | "(("compute", "compute:create_instance"),)" for a multiple action check, actions should look like | "(("identity", "identity:list_users"), | ("identity", "identity:list_roles"))" :param request: django http request object. If not specified, credentials must be passed. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. {'project_id': object.project_id} :returns: boolean if the user has permission or not for the actions.
def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % (filename,)) paths = {filename:('',extract_dir)} for base, dirs, files in os.walk(filename): src,dst = paths[base] for d in dirs: paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d) for f in files: name = src+f target = os.path.join(dst,f) target = progress_filter(src+f, target) if not target: continue # skip non-files ensure_directory(target) f = os.path.join(base,f) shutil.copyfile(f, target) shutil.copystat(f, target)
Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory
def add_plugins(self, page, placeholder): """ Add a "TextPlugin" in all languages. """ for language_code, lang_name in iter_languages(self.languages): for no in range(1, self.dummy_text_count + 1): add_plugin_kwargs = self.get_add_plugin_kwargs( page, no, placeholder, language_code, lang_name) log.info( 'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i', placeholder, placeholder.pk, lang_name, no) plugin = add_plugin( placeholder=placeholder, language=language_code, **add_plugin_kwargs) log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk) placeholder.save()
Add a "TextPlugin" in all languages.
def register(self, receiver_id, receiver): """Register a receiver.""" assert receiver_id not in self.receivers self.receivers[receiver_id] = receiver(receiver_id)
Register a receiver.
def wrap(ptr, base=None): """Wrap the given pointer with shiboken and return the appropriate QObject :returns: if ptr is not None returns a QObject that is cast to the appropriate class :rtype: QObject | None :raises: None """ if ptr is None: return None ptr = long(ptr) # Ensure type if base is None: qObj = shiboken.wrapInstance(long(ptr), QtCore.QObject) metaObj = qObj.metaObject() cls = metaObj.className() superCls = metaObj.superClass().className() if hasattr(QtGui, cls): base = getattr(QtGui, cls) elif hasattr(QtGui, superCls): base = getattr(QtGui, superCls) else: base = QtGui.QWidget return shiboken.wrapInstance(long(ptr), base)
Wrap the given pointer with shiboken and return the appropriate QObject :returns: if ptr is not None returns a QObject that is cast to the appropriate class :rtype: QObject | None :raises: None
def indian_punctuation_tokenize_regex(self, untokenized_string: str): """A trivial tokenizer which just tokenizes on the punctuation boundaries. This also includes punctuation, namely the the purna virama ("|") and deergha virama ("॥"), for Indian language scripts. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. :rtype : list of strings """ modified_punctuations = string.punctuation.replace("|", "") # The replace , deletes the ' | ' from the punctuation string provided by the library indian_punctuation_pattern = re.compile( '([' + modified_punctuations + '\u0964\u0965' + ']|\|+)') tok_str = indian_punctuation_pattern.sub(r' \1 ', untokenized_string.replace('\t', ' ')) return re.sub(r'[ ]+', u' ', tok_str).strip(' ').split(' ')
A trivial tokenizer which just tokenizes on the punctuation boundaries. This also includes punctuation, namely the the purna virama ("|") and deergha virama ("॥"), for Indian language scripts. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. :rtype : list of strings
def destroy(self): """ Custom destructor that deletes the fragment and removes itself from the adapter it was added to. """ #: Destroy fragment fragment = self.fragment if fragment: #: Stop listening fragment.setFragmentListener(None) #: Cleanup from fragment if self.adapter is not None: self.adapter.removeFragment(self.fragment) del self.fragment super(AndroidFragment, self).destroy()
Custom destructor that deletes the fragment and removes itself from the adapter it was added to.
def fake_getaddrinfo( host, port, family=None, socktype=None, proto=None, flags=None): """drop-in replacement for :py:func:`socket.getaddrinfo`""" return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', (host, port))]
drop-in replacement for :py:func:`socket.getaddrinfo`
def _load_multipolygon(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOLYGON geometry. :returns: A GeoJSON `dict` MultiPolygon representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) polygons = [] while True: try: poly = _load_polygon(tokens, string) polygons.append(poly['coordinates']) t = next(tokens) if t == ')': # we're done; no more polygons. break except StopIteration: # If we reach this, the WKT is not valid. raise ValueError(INVALID_WKT_FMT % string) return dict(type='MultiPolygon', coordinates=polygons)
Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOLYGON geometry. :returns: A GeoJSON `dict` MultiPolygon representation of the WKT ``string``.
def next_color(self): """ Returns the next color. Currently returns a random color from the Colorbrewer 11-class diverging BrBG palette. Returns ------- next_rgb_color: tuple of ImageColor """ next_rgb_color = ImageColor.getrgb(random.choice(BrBG_11.hex_colors)) return next_rgb_color
Returns the next color. Currently returns a random color from the Colorbrewer 11-class diverging BrBG palette. Returns ------- next_rgb_color: tuple of ImageColor
def create_client(self, client_id, client_secret): """ Create a new client for use by applications. """ assert self.is_admin, "Must authenticate() as admin to create client" return self.uaac.create_client(client_id, client_secret)
Create a new client for use by applications.
def import_signed(cls, name, certificate, private_key): """ Import a signed certificate and private key as a client protection CA. This is a shortcut method to the 3 step process: * Create CA with name * Import certificate * Import private key Create the CA:: ClientProtectionCA.import_signed( name='myclientca', certificate_file='/pathto/server.crt' private_key_file='/pathto/server.key') :param str name: name of client protection CA :param str certificate_file: fully qualified path or string of certificate :param str private_key_file: fully qualified path or string of private key :raises CertificateImportError: failure during import :raises IOError: failure to find certificate files specified :rtype: ClientProtectionCA """ ca = ClientProtectionCA.create(name=name) try: ca.import_certificate(certificate) ca.import_private_key(private_key) except CertificateImportError: ca.delete() raise return ca
Import a signed certificate and private key as a client protection CA. This is a shortcut method to the 3 step process: * Create CA with name * Import certificate * Import private key Create the CA:: ClientProtectionCA.import_signed( name='myclientca', certificate_file='/pathto/server.crt' private_key_file='/pathto/server.key') :param str name: name of client protection CA :param str certificate_file: fully qualified path or string of certificate :param str private_key_file: fully qualified path or string of private key :raises CertificateImportError: failure during import :raises IOError: failure to find certificate files specified :rtype: ClientProtectionCA
def add_function(self, func): """ Record line profiling information for the given Python function. """ try: # func_code does not exist in Python3 code = func.__code__ except AttributeError: import warnings warnings.warn("Could not extract a code object for the object %r" % (func,)) return if code not in self.code_map: self.code_map[code] = {} self.functions.append(func)
Record line profiling information for the given Python function.
def pipeline_name(self): """ Get pipeline name of current stage instance. Because instantiating stage instance could be performed in different ways and those return different results, we have to check where from to get name of the pipeline. :return: pipeline name. """ if 'pipeline_name' in self.data: return self.data.get('pipeline_name') elif self.pipeline is not None: return self.pipeline.data.name
Get pipeline name of current stage instance. Because instantiating stage instance could be performed in different ways and those return different results, we have to check where from to get name of the pipeline. :return: pipeline name.
def rectify_acquaintance_strategy( circuit: circuits.Circuit, acquaint_first: bool=True ) -> None: """Splits moments so that they contain either only acquaintance gates or only permutation gates. Orders resulting moments so that the first one is of the same type as the previous one. Args: circuit: The acquaintance strategy to rectify. acquaint_first: Whether to make acquaintance moment first in when splitting the first mixed moment. """ if not is_acquaintance_strategy(circuit): raise TypeError('not is_acquaintance_strategy(circuit)') rectified_moments = [] for moment in circuit: gate_type_to_ops = collections.defaultdict(list ) # type: Dict[bool, List[ops.GateOperation]] for op in moment.operations: gate_type_to_ops[isinstance(op.gate, AcquaintanceOpportunityGate) ].append(op) if len(gate_type_to_ops) == 1: rectified_moments.append(moment) continue for acquaint_first in sorted(gate_type_to_ops.keys(), reverse=acquaint_first): rectified_moments.append( ops.Moment(gate_type_to_ops[acquaint_first])) circuit._moments = rectified_moments
Splits moments so that they contain either only acquaintance gates or only permutation gates. Orders resulting moments so that the first one is of the same type as the previous one. Args: circuit: The acquaintance strategy to rectify. acquaint_first: Whether to make acquaintance moment first in when splitting the first mixed moment.
def get_tpm_status(d_info): """Get the TPM support status. Get the TPM support status of the node. :param d_info: the list of ipmitool parameters for accessing a node. :returns: TPM support status """ # note: # Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0' # # $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0 # # Raw response: # 80 28 00 C0 C0: True # 80 28 00 -- --: False (other values than 'C0 C0') ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'], userid=d_info['irmc_username'], password=d_info['irmc_password']) try: response = _send_raw_command(ipmicmd, GET_TPM_STATUS) if response['code'] != 0: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': response.get('error')}) out = ' '.join('{:02X}'.format(x) for x in response['data']) return out is not None and out[-5:] == 'C0 C0' except ipmi_exception.IpmiException as e: raise IPMIFailure( "IPMI operation '%(operation)s' failed: %(error)s" % {'operation': "GET TMP status", 'error': e})
Get the TPM support status. Get the TPM support status of the node. :param d_info: the list of ipmitool parameters for accessing a node. :returns: TPM support status
def get_wd(self, path): """ Returns the watch descriptor associated to path. This method presents a prohibitive cost, always prefer to keep the WD returned by add_watch(). If the path is unknown it returns None. @param path: Path. @type path: str @return: WD or None. @rtype: int or None """ path = self.__format_path(path) for iwd in self._wmd.items(): if iwd[1].path == path: return iwd[0]
Returns the watch descriptor associated to path. This method presents a prohibitive cost, always prefer to keep the WD returned by add_watch(). If the path is unknown it returns None. @param path: Path. @type path: str @return: WD or None. @rtype: int or None
def quantile(self, q, dim=None, interpolation='linear', numeric_only=False, keep_attrs=None): """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements for each variable in the Dataset. Parameters ---------- q : float in range of [0,1] (or sequence of floats) Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. numeric_only : bool, optional If True, only apply ``func`` to variables with a numeric dtype. Returns ------- quantiles : Dataset If `q` is a single quantile, then the result is a scalar for each variable in data_vars. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return Dataset. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile """ if isinstance(dim, str): dims = set([dim]) elif dim is None: dims = set(self.dims) else: dims = set(dim) _assert_empty([d for d in dims if d not in self.dims], 'Dataset does not contain the dimensions: %s') q = np.asarray(q, dtype=np.float64) variables = OrderedDict() for name, var in self.variables.items(): reduce_dims = [d for d in var.dims if d in dims] if reduce_dims or not var.dims: if name not in self.coords: if (not numeric_only or np.issubdtype(var.dtype, np.number) or var.dtype == np.bool_): if len(reduce_dims) == var.ndim: # prefer to aggregate over axis=None rather than # axis=(0, 1) if they will be equivalent, because # the former is often more efficient reduce_dims = None variables[name] = var.quantile( q, dim=reduce_dims, interpolation=interpolation) else: variables[name] = var # construct the new dataset coord_names = set(k for k in self.coords if k in variables) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self.attrs if keep_attrs else None new = self._replace_vars_and_dims(variables, coord_names, attrs=attrs) if 'quantile' in new.dims: new.coords['quantile'] = Variable('quantile', q) else: new.coords['quantile'] = q return new
Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements for each variable in the Dataset. Parameters ---------- q : float in range of [0,1] (or sequence of floats) Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. numeric_only : bool, optional If True, only apply ``func`` to variables with a numeric dtype. Returns ------- quantiles : Dataset If `q` is a single quantile, then the result is a scalar for each variable in data_vars. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return Dataset. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile
def QA_SU_save_index_list(engine, client=DATABASE): """save index_list Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ engine = select_save_engine(engine) engine.QA_SU_save_index_list(client=client)
save index_list Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE})
def create_cluster(cluster_dict, datacenter=None, cluster=None, service_instance=None): ''' Creates a cluster. Note: cluster_dict['name'] will be overridden by the cluster param value config_dict Dictionary with the config values of the new cluster. datacenter Name of datacenter containing the cluster. Ignored if already contained by proxy details. Default value is None. cluster Name of cluster. Ignored if already contained by proxy details. Default value is None. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash # esxdatacenter proxy salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1 # esxcluster proxy salt '*' vsphere.create_cluster cluster_dict=$cluster_dict ''' # Validate cluster dictionary schema = ESXClusterConfigSchema.serialize() try: jsonschema.validate(cluster_dict, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) # Get required details from the proxy proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) if not cluster: raise ArgumentValueError('\'cluster\' needs to be specified') elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) cluster = __salt__['esxcluster.get_details']()['cluster'] if cluster_dict.get('vsan') and not \ salt.utils.vsan.vsan_supported(service_instance): raise VMwareApiError('VSAN operations are not supported') si = service_instance cluster_spec = vim.ClusterConfigSpecEx() vsan_spec = None ha_config = None vsan_61 = None if cluster_dict.get('vsan'): # XXX The correct way of retrieving the VSAN data (on the if branch) # is not supported before 60u2 vcenter vcenter_info = salt.utils.vmware.get_service_info(si) if float(vcenter_info.apiVersion) >= 6.0 and \ int(vcenter_info.build) >= 3634794: # 60u2 vsan_spec = vim.vsan.ReconfigSpec(modify=True) vsan_61 = False # We need to keep HA disabled and enable it afterwards if cluster_dict.get('ha', {}).get('enabled'): enable_ha = True ha_config = cluster_dict['ha'] del cluster_dict['ha'] else: vsan_61 = True # If VSAN is 6.1 the configuration of VSAN happens when configuring the # cluster via the regular endpoint _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61) salt.utils.vmware.create_cluster(dc_ref, cluster, cluster_spec) if not vsan_61: # Only available after VSAN 61 if vsan_spec: cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster) salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec) if enable_ha: # Set HA after VSAN has been configured _apply_cluster_dict(cluster_spec, {'ha': ha_config}) salt.utils.vmware.update_cluster(cluster_ref, cluster_spec) # Set HA back on the object cluster_dict['ha'] = ha_config return {'create_cluster': True}
Creates a cluster. Note: cluster_dict['name'] will be overridden by the cluster param value config_dict Dictionary with the config values of the new cluster. datacenter Name of datacenter containing the cluster. Ignored if already contained by proxy details. Default value is None. cluster Name of cluster. Ignored if already contained by proxy details. Default value is None. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash # esxdatacenter proxy salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1 # esxcluster proxy salt '*' vsphere.create_cluster cluster_dict=$cluster_dict
def gdalwarp(src, dst, options): """ a simple wrapper for :osgeo:func:`gdal.Warp` Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Warp; see :osgeo:func:`gdal.WarpOptions` Returns ------- """ try: out = gdal.Warp(dst, src, options=gdal.WarpOptions(**options)) except RuntimeError as e: raise RuntimeError('{}:\n src: {}\n dst: {}\n options: {}'.format(str(e), src, dst, options)) out = None
a simple wrapper for :osgeo:func:`gdal.Warp` Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Warp; see :osgeo:func:`gdal.WarpOptions` Returns -------
def horz_offset(self, offset): """ Set the value of ./c:manualLayout/c:x@val to *offset* and ./c:manualLayout/c:xMode@val to "factor". Remove ./c:manualLayout if *offset* == 0. """ if offset == 0.0: self._remove_manualLayout() return manualLayout = self.get_or_add_manualLayout() manualLayout.horz_offset = offset
Set the value of ./c:manualLayout/c:x@val to *offset* and ./c:manualLayout/c:xMode@val to "factor". Remove ./c:manualLayout if *offset* == 0.
def add_lifecycle_delete_rule(self, **kw): """Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_delete_rule] :end-before: [END add_lifecycle_delete_rule] :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ rules = list(self.lifecycle_rules) rules.append(LifecycleRuleDelete(**kw)) self.lifecycle_rules = rules
Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_delete_rule] :end-before: [END add_lifecycle_delete_rule] :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`.
def randomBinaryField(self): """ Return random bytes format. """ lst = [ b"hello world", b"this is bytes", b"awesome django", b"djipsum is awesome", b"\x00\x01\x02\x03\x04\x05\x06\x07", b"\x0b\x0c\x0e\x0f" ] return self.randomize(lst)
Return random bytes format.
def savefig(filename, path="figs", fig=None, ext='eps', verbose=False, **kwargs): """ Save the figure *fig* (optional, if not specified, latest figure in focus) to *filename* in the path *path* with extension *ext*. *\*\*kwargs* is passed to :meth:`matplotlib.figure.Figure.savefig`. """ filename = os.path.join(path, filename) final_filename = '{}.{}'.format(filename, ext).replace(" ", "").replace("\n", "") final_filename = os.path.abspath(final_filename) final_path = os.path.dirname(final_filename) if not os.path.exists(final_path): os.makedirs(final_path) if verbose: print('Saving file: {}'.format(final_filename)) if fig is not None: fig.savefig(final_filename, bbox_inches='tight', **kwargs) else: plt.savefig(final_filename, bbox_inches='tight', **kwargs)
Save the figure *fig* (optional, if not specified, latest figure in focus) to *filename* in the path *path* with extension *ext*. *\*\*kwargs* is passed to :meth:`matplotlib.figure.Figure.savefig`.
def _find(api_method, query, limit, return_handler, first_page_size, **kwargs): ''' Takes an API method handler (dxpy.api.find*) and calls it with *query*, and then wraps a generator around its output. Used by the methods below. Note that this function may only be used for /system/find* methods. ''' num_results = 0 if "limit" not in query: query["limit"] = first_page_size while True: resp = api_method(query, **kwargs) by_parent = resp.get('byParent') descriptions = resp.get('describe') def format_result(result): if return_handler: result = dxpy.get_handler(result['id'], project=result.get('project')) if by_parent is not None: return result, by_parent, descriptions else: return result for i in resp["results"]: if num_results == limit: return num_results += 1 yield format_result(i) # set up next query if resp["next"] is not None: query["starting"] = resp["next"] query["limit"] = min(query["limit"]*2, 1000) else: return
Takes an API method handler (dxpy.api.find*) and calls it with *query*, and then wraps a generator around its output. Used by the methods below. Note that this function may only be used for /system/find* methods.
def get_data_dir(module_name: str) -> str: """Ensure the appropriate Bio2BEL data directory exists for the given module, then returns the file path. :param module_name: The name of the module. Ex: 'chembl' :return: The module's data directory """ module_name = module_name.lower() data_dir = os.path.join(BIO2BEL_DIR, module_name) os.makedirs(data_dir, exist_ok=True) return data_dir
Ensure the appropriate Bio2BEL data directory exists for the given module, then returns the file path. :param module_name: The name of the module. Ex: 'chembl' :return: The module's data directory
def do_list_cap(self, line): """list_cap <peer> """ def f(p, args): for i in p.netconf.server_capabilities: print(i) self._request(line, f)
list_cap <peer>
def modified_Wilson_Tc(zs, Tcs, Aijs): r'''Calculates critical temperature of a mixture according to mixing rules in [1]_. Equation .. math:: T_{cm} = \sum_i x_i T_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)T_{ref} For a binary mxiture, this simplifies to: .. math:: T_{cm} = x_1 T_{c1} + x_2 T_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})] Parameters ---------- zs : float Mole fractions of all components Tcs : float Critical temperatures of all components, [K] Aijs : matrix Interaction parameters Returns ------- Tcm : float Critical temperatures of the mixture, [K] Notes ----- The equation and original article has been reviewed. [1]_ has 75 binary systems, and additional multicomponent mixture parameters. All parameters, even if zero, must be given to this function. 2rd example is from [2]_, for: butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K. Its result is identical to that calculated in the article. Examples -------- >>> modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], ... [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038], ... [0.746878, 0.80677, 0]]) 450.0305966823031 References ---------- .. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the Calculation of Gas-Liquid Critical Temperatures and Pressures of Multicomponent Mixtures." Industrial & Engineering Chemistry Process Design and Development 22, no. 4 (1983): 672-76. .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati. "Prediction of True Critical Temperature of Multi-Component Mixtures: Extending Fast Estimation Methods." Fluid Phase Equilibria 392 (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001. ''' if not none_and_length_check([zs, Tcs]): raise Exception('Function inputs are incorrect format') C = -2500 Tcm = sum(zs[i]*Tcs[i] for i in range(len(zs))) for i in range(len(zs)): Tcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs)))) return Tcm
r'''Calculates critical temperature of a mixture according to mixing rules in [1]_. Equation .. math:: T_{cm} = \sum_i x_i T_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)T_{ref} For a binary mxiture, this simplifies to: .. math:: T_{cm} = x_1 T_{c1} + x_2 T_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})] Parameters ---------- zs : float Mole fractions of all components Tcs : float Critical temperatures of all components, [K] Aijs : matrix Interaction parameters Returns ------- Tcm : float Critical temperatures of the mixture, [K] Notes ----- The equation and original article has been reviewed. [1]_ has 75 binary systems, and additional multicomponent mixture parameters. All parameters, even if zero, must be given to this function. 2rd example is from [2]_, for: butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K. Its result is identical to that calculated in the article. Examples -------- >>> modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], ... [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038], ... [0.746878, 0.80677, 0]]) 450.0305966823031 References ---------- .. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the Calculation of Gas-Liquid Critical Temperatures and Pressures of Multicomponent Mixtures." Industrial & Engineering Chemistry Process Design and Development 22, no. 4 (1983): 672-76. .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati. "Prediction of True Critical Temperature of Multi-Component Mixtures: Extending Fast Estimation Methods." Fluid Phase Equilibria 392 (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
def mavlink_packet(self, msg): '''handle an incoming mavlink packet''' if not isinstance(self.checklist, mp_checklist.CheckUI): return if not self.checklist.is_alive(): return type = msg.get_type() master = self.master if type == 'HEARTBEAT': '''beforeEngineList - APM booted''' if self.mpstate.status.heartbeat_error == True: self.checklist.set_check("Pixhawk Booted", 0) else: self.checklist.set_check("Pixhawk Booted", 1) '''beforeEngineList - Flight mode MANUAL''' if self.mpstate.status.flightmode == "MANUAL": self.checklist.set_check("Flight mode MANUAL", 1) else: self.checklist.set_check("Flight mode MANUAL", 0) if type in [ 'GPS_RAW', 'GPS_RAW_INT' ]: '''beforeEngineList - GPS lock''' if ((msg.fix_type >= 3 and master.mavlink10()) or (msg.fix_type == 2 and not master.mavlink10())): self.checklist.set_check("GPS lock", 1) else: self.checklist.set_check("GPS lock", 0) '''beforeEngineList - Radio Links > 6db margin TODO: figure out how to read db levels''' if type in ['RADIO', 'RADIO_STATUS']: if msg.rssi < msg.noise+6 or msg.remrssi < msg.remnoise+6: self.checklist.set_check("Radio links > 6db margin", 0) else: self.checklist.set_check("Radio Links > 6db margin", 0) if type == 'HWSTATUS': '''beforeEngineList - Avionics Battery''' if msg.Vcc >= 4600 and msg.Vcc <= 5300: self.checklist.set_check("Avionics Power", 1) else: self.checklist.set_check("Avionics Power", 0) if type == 'POWER_STATUS': '''beforeEngineList - Servo Power''' if msg.Vservo >= 4900 and msg.Vservo <= 6500: self.checklist.set_check("Servo Power", 1) else: self.checklist.set_check("Servo Power", 0) '''beforeEngineList - Waypoints Loaded''' if type == 'HEARTBEAT': if self.module('wp').wploader.count() == 0: self.checklist.set_check("Waypoints Loaded", 0) else: self.checklist.set_check("Waypoints Loaded", 1) '''beforeTakeoffList - Compass active''' if type == 'GPS_RAW': if math.fabs(msg.hdg - master.field('VFR_HUD', 'heading', '-')) < 10 or math.fabs(msg.hdg - master.field('VFR_HUD', 'heading', '-')) > 355: self.checklist.set_check("Compass active", 1) else: self.checklist.set_check("Compass active", 0) '''beforeCruiseList - Airspeed > 10 m/s , Altitude > 30 m''' if type == 'VFR_HUD': rel_alt = master.field('GLOBAL_POSITION_INT', 'relative_alt', 0) * 1.0e-3 if rel_alt > 30: self.checklist.set_check("Altitude > 30 m", 1) else: self.checklist.set_check("Altitude > 30 m", 0) if msg.airspeed > 10 or msg.groundspeed > 10: self.checklist.set_check("Airspeed > 10 m/s", 1) else: self.checklist.set_check("Airspeed > 10 m/s", 0) '''beforeEngineList - IMU''' if type in ['SYS_STATUS']: sensors = { 'AS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE, 'MAG' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_MAG, 'INS' : mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_ACCEL | mavutil.mavlink.MAV_SYS_STATUS_SENSOR_3D_GYRO, 'AHRS' : mavutil.mavlink.MAV_SYS_STATUS_AHRS} bits = sensors['INS'] present = ((msg.onboard_control_sensors_enabled & bits) == bits) healthy = ((msg.onboard_control_sensors_health & bits) == bits) if not present or not healthy: self.checklist.set_check("IMU Check", 1) else: self.checklist.set_check("IMU Check", 0)
handle an incoming mavlink packet
def main(): """Provide main CLI entrypoint.""" if os.environ.get('DEBUG'): logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # botocore info is spammy logging.getLogger('botocore').setLevel(logging.ERROR) cli_arguments = fix_hyphen_commands(docopt(__doc__, version=version)) # at least one of these must be enabled, i.e. the value is 'True'... but unfortunately # `docopts` doesn't give you the hierarchy... so given 'gen-sample cfn', there are # TWO enabled items in the list, 'gen-sample' and 'cfn' possible_commands = [command for command, enabled in cli_arguments.items() if enabled] command_class = find_command_class(possible_commands) if command_class: command_class(cli_arguments).execute() else: LOGGER.error("class not found for command '%s'", possible_commands)
Provide main CLI entrypoint.
def references_json_authors(ref_authors, ref_content): "build the authors for references json here for testability" all_authors = references_authors(ref_authors) if all_authors != {}: if ref_content.get("type") in ["conference-proceeding", "journal", "other", "periodical", "preprint", "report", "web"]: for author_type in ["authors", "authorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["book", "book-chapter"]: for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["clinical-trial"]: # Always set as authors, once, then add the authorsType for author_type in ["authors", "collaborators", "sponsors"]: if "authorsType" not in ref_content and all_authors.get(author_type): set_if_value(ref_content, "authors", all_authors.get(author_type)) set_if_value(ref_content, "authorsEtAl", all_authors.get(author_type + "EtAl")) ref_content["authorsType"] = author_type elif ref_content.get("type") in ["data", "software"]: for author_type in ["authors", "authorsEtAl", "compilers", "compilersEtAl", "curators", "curatorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["patent"]: for author_type in ["inventors", "inventorsEtAl", "assignees", "assigneesEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["thesis"]: # Convert list to a non-list if all_authors.get("authors") and len(all_authors.get("authors")) > 0: ref_content["author"] = all_authors.get("authors")[0] return ref_content
build the authors for references json here for testability
def convtable2dict(convtable, locale, update=None): """ Convert a list of conversion dict to a dict for a certain locale. >>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items()) [('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')] """ rdict = update.copy() if update else {} for r in convtable: if ':uni' in r: if locale in r: rdict[r[':uni']] = r[locale] elif locale[:-1] == 'zh-han': if locale in r: for word in r.values(): rdict[word] = r[locale] else: v = fallback(locale, r) for word in r.values(): rdict[word] = v return rdict
Convert a list of conversion dict to a dict for a certain locale. >>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items()) [('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')]
def viewAt(self, point): """ Looks up the view at the inputed point. :param point | <QtCore.QPoint> :return <projexui.widgets.xviewwidget.XView> || None """ widget = self.childAt(point) if widget: return projexui.ancestor(widget, XView) else: return None
Looks up the view at the inputed point. :param point | <QtCore.QPoint> :return <projexui.widgets.xviewwidget.XView> || None
def insert_or_merge_entity(self, table_name, entity, timeout=None): ''' Merges an existing entity or inserts a new entity if it does not exist in the table. If insert_or_merge_entity is used to merge an entity, any properties from the previous entity will be retained if the request does not define or include them. :param str table_name: The name of the table in which to insert or merge the entity. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_or_merge_entity(entity) request.host = self._get_host() request.query += [('timeout', _int_to_str(timeout))] request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) response = self._perform_request(request) return _extract_etag(response)
Merges an existing entity or inserts a new entity if it does not exist in the table. If insert_or_merge_entity is used to merge an entity, any properties from the previous entity will be retained if the request does not define or include them. :param str table_name: The name of the table in which to insert or merge the entity. :param entity: The entity to insert or merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
def run_ppm_server(pdb_file, outfile, force_rerun=False): """Run the PPM server from OPM to predict transmembrane residues. Args: pdb_file (str): Path to PDB file outfile (str): Path to output HTML results file force_rerun (bool): Flag to rerun PPM if HTML results file already exists Returns: dict: Dictionary of information from the PPM run, including a link to download the membrane protein file """ if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun): url = 'http://sunshine.phar.umich.edu/upload_file.php' files = {'userfile': open(pdb_file, 'rb')} r = requests.post(url, files=files) info = r.text # Save results in raw HTML format with open(outfile, 'w') as f: f.write(info) else: # Utilize existing saved results with open(outfile, 'r') as f: info = f.read() # Clean up the HTML stuff t = info.replace('\n', '') tt = t.replace('\r', '') ttt = tt.replace('\t', '') soup = BeautifulSoup(ttt, "lxml") # Find all tables in the HTML code tables = soup.find_all("table", attrs={"class": "data"}) info_dict = {} # There are multiple tables with information table_index = 0 for t in tables: data_index = 0 # "row1" contains data for data in t.find_all('tr', attrs={"class": "row1"}): data_list = list(data.strings) if table_index == 0: info_dict['Depth/Hydrophobic Thickness'] = data_list[0] info_dict['deltaG_transfer'] = data_list[2] info_dict['Tilt Angle'] = data_list[3] if table_index == 1 and data_index == 0: info_dict['Embedded_residues_Tilt'] = data_list[0] info_dict['Embedded_residues'] = data_list[1] if table_index == 1 and data_index == 1: info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0] info_dict['Transmembrane_secondary_structure_segments'] = data_list[1] if table_index == 2: info_dict['Output Messages'] = data_list[1] if table_index == 3: baseurl = 'http://sunshine.phar.umich.edu/' a = data.find('a', href=True) download_url = baseurl + a['href'].replace('./', '') info_dict['Output file download link'] = download_url data_index += 1 table_index += 1 return info_dict
Run the PPM server from OPM to predict transmembrane residues. Args: pdb_file (str): Path to PDB file outfile (str): Path to output HTML results file force_rerun (bool): Flag to rerun PPM if HTML results file already exists Returns: dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
def _use_tables(objs): ''' Whether a collection of Bokeh objects contains a TableWidget Args: objs (seq[Model or Document]) : Returns: bool ''' from ..models.widgets import TableWidget return _any(objs, lambda obj: isinstance(obj, TableWidget))
Whether a collection of Bokeh objects contains a TableWidget Args: objs (seq[Model or Document]) : Returns: bool
def update_replication_schedule(self, schedule_id, schedule): """ Update a replication schedule. @param schedule_id: The id of the schedule to update. @param schedule: The modified schedule. @return: The updated replication schedule. @since: API v3 """ return self._put("replications/%s" % schedule_id, ApiReplicationSchedule, data=schedule, api_version=3)
Update a replication schedule. @param schedule_id: The id of the schedule to update. @param schedule: The modified schedule. @return: The updated replication schedule. @since: API v3
def align_transcriptome(fastq_file, pair_file, ref_file, data): """ bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc """ work_bam = dd.get_work_bam(data) base, ext = os.path.splitext(work_bam) out_file = base + ".transcriptome" + ext if utils.file_exists(out_file): data = dd.set_transcriptome_bam(data, out_file) return data bowtie2 = config_utils.get_program("bowtie2", data["config"]) gtf_file = dd.get_gtf_file(data) gtf_index = index_transcriptome(gtf_file, ref_file, data) num_cores = data["config"]["algorithm"].get("num_cores", 1) fastq_cmd = "-1 %s" % fastq_file if pair_file else "-U %s" % fastq_file pair_cmd = "-2 %s " % pair_file if pair_file else "" cmd = ("{bowtie2} -p {num_cores} -a -X 600 --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 --no-discordant --no-mixed -x {gtf_index} {fastq_cmd} {pair_cmd} ") with file_transaction(data, out_file) as tx_out_file: message = "Aligning %s and %s to the transcriptome." % (fastq_file, pair_file) cmd += "| " + postalign.sam_to_sortbam_cl(data, tx_out_file, name_sort=True) do.run(cmd.format(**locals()), message) data = dd.set_transcriptome_bam(data, out_file) return data
bowtie2 with settings for aligning to the transcriptome for eXpress/RSEM/etc
def proj_path(*path_parts): # type: (str) -> str """ Return absolute path to the repo dir (root project directory). Args: path (str): The path relative to the project root (pelconf.yaml). Returns: str: The given path converted to an absolute path. """ path_parts = path_parts or ['.'] # If path represented by path_parts is absolute, do not modify it. if not os.path.isabs(path_parts[0]): proj_path = _find_proj_root() if proj_path is not None: path_parts = [proj_path] + list(path_parts) return os.path.normpath(os.path.join(*path_parts))
Return absolute path to the repo dir (root project directory). Args: path (str): The path relative to the project root (pelconf.yaml). Returns: str: The given path converted to an absolute path.
def between(arg, lower, upper): """ Check if the input expr falls between the lower/upper bounds passed. Bounds are inclusive. All arguments must be comparable. Returns ------- is_between : BooleanValue """ lower = as_value_expr(lower) upper = as_value_expr(upper) op = ops.Between(arg, lower, upper) return op.to_expr()
Check if the input expr falls between the lower/upper bounds passed. Bounds are inclusive. All arguments must be comparable. Returns ------- is_between : BooleanValue
def all_tags_of_type(self, type_or_types, recurse_into_sprites = True): """ Generator for all tags of the given type_or_types. Generates in breadth-first order, optionally including all sub-containers. """ for t in self.tags: if isinstance(t, type_or_types): yield t if recurse_into_sprites: for t in self.tags: # recurse into nested sprites if isinstance(t, SWFTimelineContainer): for containedtag in t.all_tags_of_type(type_or_types): yield containedtag
Generator for all tags of the given type_or_types. Generates in breadth-first order, optionally including all sub-containers.
def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]: '''Converts `fluents` to tensors with datatype tf.float32.''' output = [] for _, fluent in fluents: tensor = fluent.tensor if tensor.dtype != tf.float32: tensor = tf.cast(tensor, tf.float32) output.append(tensor) return tuple(output)
Converts `fluents` to tensors with datatype tf.float32.
def parse_file(self, fpath): ''' Read a file on the file system (relative to salt's base project dir) :returns: A file-like object. :raises IOError: If the file cannot be found or read. ''' sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir)) with open(os.path.join(sdir, fpath), 'rb') as f: return f.readlines()
Read a file on the file system (relative to salt's base project dir) :returns: A file-like object. :raises IOError: If the file cannot be found or read.
def fost_hmac_url_signature( key, secret, host, path, query_string, expires): """ Return a signature that corresponds to the signed URL. """ if query_string: document = '%s%s?%s\n%s' % (host, path, query_string, expires) else: document = '%s%s\n%s' % (host, path, expires) signature = sha1_hmac(secret, document) return signature
Return a signature that corresponds to the signed URL.
def configuration(self): """ :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationList """ if self._configuration is None: self._configuration = ConfigurationList(self) return self._configuration
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationList
def cleanup(self, ctime=None): ''' This method is called iteratively by the connection owning it. Its job is to control the size of cache and remove old entries. ''' ctime = ctime or time.time() if self.last_cleanup: self.average_cleanup_time.add_point(ctime - self.last_cleanup) self.last_cleanup = ctime log.debug('couchdb', "Running cache cleanup().") # first remove already invalidated entries, used this iteration to # build up the map of usage expire = list() # [(num_accessed / time_in_cache, size, ident)] usage = list() actual_size = 0 for ident, entry in self.iteritems(): if entry.state is EntryState.invalid: expire.append(ident) continue elif entry.state is EntryState.waiting: continue else: # EntryState.ready actual_size += entry.size time_in_cache = max([ctime - entry.cached_at, 1]) usage.append(( float(entry.num_accessed) / time_in_cache, -entry.size, ident)) self.average_size.add_point(actual_size) if self.average_size.get_value() > 3 * self.desired_size: log.warning("couchdb", "The average size of Cache is %.2f times " "bigger than the desired size of: %s. It might be " "a good idea to rethink the caching strategy.", self.average_size.get_value() / self.desired_size, self.desired_size) if actual_size > self.desired_size: log.debug('couchdb', "I will have to cleanup some data, " "the actual size is: %s, the desired limit is %s.", actual_size, self.desired_size) # The usage list is sorted in order of things I will # be removing first. The important factor is "density" of usages # in time. usage.sort() size_to_delete = 0 num_to_delete = 0 while (len(usage) > 1 and actual_size - size_to_delete > self.desired_size): _, negative_size, ident = usage.pop(0) size_to_delete += -negative_size num_to_delete += 1 expire.append(ident) log.debug('couchdb', "I will remove %d entries from cache of the " "size of %s to compensate the size.", num_to_delete, size_to_delete) for ident in expire: del self[ident]
This method is called iteratively by the connection owning it. Its job is to control the size of cache and remove old entries.
def _run_program(self, bin, fastafile, params=None): """ Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) fgfile = os.path.join(self.tmpdir, "AMD.in.fa") outfile = fgfile + ".Matrix" shutil.copy(fastafile, fgfile) current_path = os.getcwd() os.chdir(self.tmpdir) stdout = "" stderr = "" cmd = "%s -F %s -B %s" % ( bin, fgfile, params["background"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) motifs = [] if os.path.exists(outfile): f = open(outfile) motifs = self.parse(f) f.close() return motifs, stdout, stderr
Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
def enumerate_sources(self): """ Return a list of (source_id, source_name) tuples """ sources = [] for source_id in range(1, 17): try: name = yield from self.get_source_variable(source_id, 'name') if name: sources.append((source_id, name)) except CommandException: break return sources
Return a list of (source_id, source_name) tuples
def _tidy(self) -> None: """ Removes overlaps, etc., and sorts. """ if self.no_overlap: self.remove_overlap(self.no_contiguous) # will sort else: self._sort()
Removes overlaps, etc., and sorts.
def compile(self, options=[]): """ Compiles the program object to PTX using the compiler options specified in `options`. """ try: self._interface.nvrtcCompileProgram(self._program, options) ptx = self._interface.nvrtcGetPTX(self._program) return ptx except NVRTCException as e: log = self._interface.nvrtcGetProgramLog(self._program) raise ProgramException(log)
Compiles the program object to PTX using the compiler options specified in `options`.
def cmdline_split(s: str, platform: Union[int, str] = 'this') -> List[str]: """ As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved) """ # noqa if platform == 'this': platform = (sys.platform != 'win32') # RNC: includes 64-bit Windows if platform == 1: # POSIX re_cmd_lex = r'''"((?:\\["\\]|[^"])*)"|'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'"\\&|<>]+)|(\s+)|(.)''' # noqa elif platform == 0: # Windows/CMD re_cmd_lex = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' # noqa else: raise AssertionError('unknown platform %r' % platform) args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(re_cmd_lex, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args
As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved)
def insert(name, table='filter', family='ipv4', **kwargs): ''' .. versionadded:: 2014.1.0 Insert a rule into a chain name A user-defined name to call this rule by in another part of a state or formula. This should not be an actual rule. table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 position The numerical representation of where the rule should be inserted into the chain. Note that ``-1`` is not a supported position value. All other arguments are passed in with the same name as the long option that would normally be used for iptables, with one exception: ``--state`` is specified as `connstate` instead of `state` (not to be confused with `ctstate`). Jump options that doesn't take arguments should be passed in with an empty string. ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'rules' in kwargs: ret['changes']['locale'] = [] comments = [] save = False for rule in kwargs['rules']: if 'rules' in rule: del rule['rules'] if '__agg__' in rule: del rule['__agg__'] if 'save' in rule and rule['save']: save = True if rule['save'] is not True: save_file = rule['save'] else: save_file = True rule['save'] = False _ret = insert(**rule) if 'locale' in _ret['changes']: ret['changes']['locale'].append(_ret['changes']['locale']) comments.append(_ret['comment']) ret['result'] = _ret['result'] if save: if save_file is True: save_file = None __salt__['iptables.save'](save_file, family=family) if not ret['changes']['locale']: del ret['changes']['locale'] ret['comment'] = '\n'.join(comments) return ret for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] kwargs['name'] = name kwargs['table'] = table rule = __salt__['iptables.build_rule'](family=family, **kwargs) command = __salt__['iptables.build_rule'](full=True, family=family, command='I', **kwargs) if __salt__['iptables.check'](table, kwargs['chain'], rule, family) is True: ret['result'] = True ret['comment'] = 'iptables rule for {0} already set for {1} ({2})'.format( name, family, command.strip()) if 'save' in kwargs and kwargs['save']: if kwargs['save'] is not True: filename = kwargs['save'] else: filename = None saved_rules = __salt__['iptables.get_saved_rules'](family=family) _rules = __salt__['iptables.get_rules'](family=family) __rules = [] for table in _rules: for chain in _rules[table]: __rules.append(_rules[table][chain].get('rules')) __saved_rules = [] for table in saved_rules: for chain in saved_rules[table]: __saved_rules.append(saved_rules[table][chain].get('rules')) # Only save if rules in memory are different than saved rules if __rules != __saved_rules: out = __salt__['iptables.save'](filename, family=family) ret['comment'] += ('\nSaved iptables rule {0} for {1}\n' '{2}\n{3}').format(name, family, command.strip(), out) return ret if __opts__['test']: ret['comment'] = 'iptables rule for {0} needs to be set for {1} ({2})'.format( name, family, command.strip()) return ret if not __salt__['iptables.insert'](table, kwargs['chain'], kwargs['position'], rule, family): ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format( name, command.strip(), family) if 'save' in kwargs: if kwargs['save']: out = __salt__['iptables.save'](filename=None, family=family) ret['comment'] = ('Set and saved iptables rule {0} for {1}\n' '{2}\n{3}').format(name, family, command.strip(), out) return ret else: ret['result'] = False ret['comment'] = ('Failed to set iptables rule for {0}.\n' 'Attempted rule was {1}').format( name, command.strip()) return ret
.. versionadded:: 2014.1.0 Insert a rule into a chain name A user-defined name to call this rule by in another part of a state or formula. This should not be an actual rule. table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 position The numerical representation of where the rule should be inserted into the chain. Note that ``-1`` is not a supported position value. All other arguments are passed in with the same name as the long option that would normally be used for iptables, with one exception: ``--state`` is specified as `connstate` instead of `state` (not to be confused with `ctstate`). Jump options that doesn't take arguments should be passed in with an empty string.
def calc_mean(c0, c1=[]): """ Calculates the mean of the data.""" if c1 != []: return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2. else: return numpy.mean(c0, 0)
Calculates the mean of the data.
def add_interrupt_callback(gpio_id, callback, edge='both', \ pull_up_down=PUD_OFF, threaded_callback=False, \ debounce_timeout_ms=None): """ Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. If debounce_timeout_ms is set, new interrupts will not be forwarded until after the specified amount of milliseconds. """ _rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \ threaded_callback, debounce_timeout_ms)
Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. If debounce_timeout_ms is set, new interrupts will not be forwarded until after the specified amount of milliseconds.
def dataframe_except(df, *cols, **filters): ''' dataframe_except(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe contain only the rows whose cells match the given values. dataframe_except(df, col1, col2...) selects all columns except for the given columns. dataframe_except(df, col1, col2..., k1=v1, k2=v2...) selects on both conditions. The dataframe_except() function is identical to the dataframe_select() function with the single difference being that the column names provided to dataframe_except() are dropped from the result while column names passed to dataframe_select() are kept. If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall between the values. If value is a tuple/list of more than 2 elements or is a set of any length then it is a list of values, any one of which can match the cell. ''' ii = np.ones(len(df), dtype='bool') for (k,v) in six.iteritems(filters): vals = df[k].values if pimms.is_set(v): jj = np.isin(vals, list(v)) elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1]) elif pimms.is_vector(v): jj = np.isin(vals, list(v)) else: jj = (vals == v) ii = np.logical_and(ii, jj) if len(ii) != np.sum(ii): df = df.loc[ii] if len(cols) > 0: df = df.drop(list(cols), axis=1, inplace=False) return df
dataframe_except(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe contain only the rows whose cells match the given values. dataframe_except(df, col1, col2...) selects all columns except for the given columns. dataframe_except(df, col1, col2..., k1=v1, k2=v2...) selects on both conditions. The dataframe_except() function is identical to the dataframe_select() function with the single difference being that the column names provided to dataframe_except() are dropped from the result while column names passed to dataframe_select() are kept. If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall between the values. If value is a tuple/list of more than 2 elements or is a set of any length then it is a list of values, any one of which can match the cell.
def fcoe_get_login_input_fcoe_login_vlan(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login input = ET.SubElement(fcoe_get_login, "input") fcoe_login_vlan = ET.SubElement(input, "fcoe-login-vlan") fcoe_login_vlan.text = kwargs.pop('fcoe_login_vlan') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _create_config_translation(cls, config, lang): """ Creates a translation for the given ApphookConfig Only django-parler kind of models are currently supported. ``AutoCMSAppMixin.auto_setup['config_translated_fields']`` is used to fill in the data of the instance for all the languages. :param config: ApphookConfig instance :param lang: language code for the language to create """ config.set_current_language(lang, initialize=True) for field, data in cls.auto_setup['config_translated_fields'].items(): setattr(config, field, data) config.save_translations()
Creates a translation for the given ApphookConfig Only django-parler kind of models are currently supported. ``AutoCMSAppMixin.auto_setup['config_translated_fields']`` is used to fill in the data of the instance for all the languages. :param config: ApphookConfig instance :param lang: language code for the language to create
def ticket1to2(old): """ change Ticket to refer to Products and not benefactor factories. """ if isinstance(old.benefactor, Multifactor): types = list(chain(*[b.powerupNames for b in old.benefactor.benefactors('ascending')])) elif isinstance(old.benefactor, InitializerBenefactor): #oh man what a mess types = list(chain(*[b.powerupNames for b in old.benefactor.realBenefactor.benefactors('ascending')])) newProduct = old.store.findOrCreate(Product, types=types) if old.issuer is None: issuer = old.store.findOrCreate(TicketBooth) else: issuer = old.issuer t = old.upgradeVersion(Ticket.typeName, 1, 2, product = newProduct, issuer = issuer, booth = old.booth, avatar = old.avatar, claimed = old.claimed, email = old.email, nonce = old.nonce)
change Ticket to refer to Products and not benefactor factories.
def get_boot_device(self): """Get the current boot device for the node. Provides the current boot device of the node. Be aware that not all drivers support this. :raises: InvalidParameterValue if any connection parameters are incorrect. :raises: MissingParameterValue if a required parameter is missing :returns: a dictionary containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown. """ operation = 'get_boot_device' try: boot_device = None boot_devices = get_children_by_dn(self.__handle, self.__boot_policy_dn) if boot_devices: for boot_device_mo in boot_devices: if boot_device_mo.Order == 1: boot_device = boot_device_rn[boot_device_mo.Rn] break return {'boot_device': boot_device, 'persistent': None} except UcsException as ex: print "Cisco client exception: %ss." %(ex) raise exception.UcsOperationError(operation=operation, error=ex)
Get the current boot device for the node. Provides the current boot device of the node. Be aware that not all drivers support this. :raises: InvalidParameterValue if any connection parameters are incorrect. :raises: MissingParameterValue if a required parameter is missing :returns: a dictionary containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown.
def set_project_path(self, path): """ Sets the project path and disables the project search in the combobox if the value of path is None. """ if path is None: self.project_path = None self.model().item(PROJECT, 0).setEnabled(False) if self.currentIndex() == PROJECT: self.setCurrentIndex(CWD) else: path = osp.abspath(path) self.project_path = path self.model().item(PROJECT, 0).setEnabled(True)
Sets the project path and disables the project search in the combobox if the value of path is None.
def _calculate_cloud_ice_perc(self): """ Return the percentage of pixels that are either cloud or snow with high confidence (> 67%). """ self.output('Calculating cloud and snow coverage from QA band', normal=True, arrow=True) a = rasterio.open(join(self.scene_path, self._get_full_filename('QA'))).read_band(1) cloud_high_conf = int('1100000000000000', 2) snow_high_conf = int('0000110000000000', 2) fill_pixels = int('0000000000000001', 2) cloud_mask = numpy.bitwise_and(a, cloud_high_conf) == cloud_high_conf snow_mask = numpy.bitwise_and(a, snow_high_conf) == snow_high_conf fill_mask = numpy.bitwise_and(a, fill_pixels) == fill_pixels perc = numpy.true_divide(numpy.sum(cloud_mask | snow_mask), a.size - numpy.sum(fill_mask)) * 100.0 self.output('cloud/snow coverage: %s' % round(perc, 2), indent=1, normal=True, color='green') return perc
Return the percentage of pixels that are either cloud or snow with high confidence (> 67%).