code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def import_libs(self, module_names, impl_name): """ Loop through module_names, add has_.... booleans to class set ..._impl to first successful import :param module_names: list of module names to try importing :param impl_name: used in error output if no modules succeed :return: name, module from first successful implementation """ for name in module_names: try: module = __import__(name) has_module = True except ImportError: module = None has_module = False setattr(self, name, module) setattr(self, 'has_%s' % name, has_module) for name in module_names: try: return name, __import__(name) except ImportError: pass raise ImportError('No %s Implementation found, tried: %s' % (impl_name, ' '.join(module_names)))
Loop through module_names, add has_.... booleans to class set ..._impl to first successful import :param module_names: list of module names to try importing :param impl_name: used in error output if no modules succeed :return: name, module from first successful implementation
def get_default_value(self): """ Return the default value for the parameter. If here is no default value, return None """ if ('default_value' in self.attributes and bool(self.attributes['default_value'].strip())): return self.attributes['default_value'] else: return None
Return the default value for the parameter. If here is no default value, return None
def refactor(self, symbol, value): """ Args: symbol: value: Returns: None """ if value: self.pset.add(symbol) else: self.pset.remove(symbol)
Args: symbol: value: Returns: None
def start(host='localhost', port=61613, username='', password=''): """Start twisted event loop and the fun should begin... """ StompClientFactory.username = username StompClientFactory.password = password reactor.connectTCP(host, port, StompClientFactory()) reactor.run()
Start twisted event loop and the fun should begin...
def get_leafs(self, name): r""" Get the sub-tree leaf node(s). :param name: Sub-tree root node name :type name: :ref:`NodeName` :rtype: list of :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree) """ if self._validate_node_name(name): raise RuntimeError("Argument `name` is not valid") self._node_in_tree(name) return [node for node in self._get_subtree(name) if self.is_leaf(node)]
r""" Get the sub-tree leaf node(s). :param name: Sub-tree root node name :type name: :ref:`NodeName` :rtype: list of :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree)
def combine_pdf_as_bytes(pdfs: List[BytesIO]) -> bytes: """Combine PDFs and return a byte-string with the result. Arguments --------- pdfs A list of BytesIO representations of PDFs """ writer = PdfWriter() for pdf in pdfs: writer.addpages(PdfReader(pdf).pages) bio = BytesIO() writer.write(bio) bio.seek(0) output = bio.read() bio.close() return output
Combine PDFs and return a byte-string with the result. Arguments --------- pdfs A list of BytesIO representations of PDFs
def ratio_value_number_to_time_series_length(self, x): """ As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\ /tsfresh/feature_extraction/feature_calculators.py#L830>`_ Returns a factor which is 1 if all values in the time series occur only once, and below one if this is not the case. In principle, it just returns: # unique values / # values :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float """ ratio = feature_calculators.ratio_value_number_to_time_series_length(x) logging.debug("ratio value number to time series length by tsfresh calculated") return ratio
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\ /tsfresh/feature_extraction/feature_calculators.py#L830>`_ Returns a factor which is 1 if all values in the time series occur only once, and below one if this is not the case. In principle, it just returns: # unique values / # values :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float
def requires_public_key(func): """ Decorator for functions that require the public key to be defined. By definition, this includes the private key, as such, it's enough to use this to effect definition of both public and private key. """ def func_wrapper(self, *args, **kwargs): if hasattr(self, "public_key"): func(self, *args, **kwargs) else: self.generate_public_key() func(self, *args, **kwargs) return func_wrapper
Decorator for functions that require the public key to be defined. By definition, this includes the private key, as such, it's enough to use this to effect definition of both public and private key.
def footprints_from_point(point, distance, footprint_type='building', retain_invalid=False): """ Get footprints within some distance north, south, east, and west of a lat-long point. Parameters ---------- point : tuple a lat-long point distance : numeric distance in meters footprint_type : string type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc. retain_invalid : bool if False discard any footprints with an invalid geometry Returns ------- GeoDataFrame """ bbox = bbox_from_point(point=point, distance=distance) north, south, east, west = bbox return create_footprints_gdf(north=north, south=south, east=east, west=west, footprint_type=footprint_type, retain_invalid=retain_invalid)
Get footprints within some distance north, south, east, and west of a lat-long point. Parameters ---------- point : tuple a lat-long point distance : numeric distance in meters footprint_type : string type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc. retain_invalid : bool if False discard any footprints with an invalid geometry Returns ------- GeoDataFrame
def _jx_expression(expr, lang): """ WRAP A JSON EXPRESSION WITH OBJECT REPRESENTATION """ if is_expression(expr): # CONVERT TO lang new_op = lang[expr.id] if not new_op: # CAN NOT BE FOUND, TRY SOME PARTIAL EVAL return language[expr.id].partial_eval() return expr # return new_op(expr.args) # THIS CAN BE DONE, BUT IT NEEDS MORE CODING, AND I WOULD EXPECT IT TO BE SLOW if expr is None: return TRUE elif expr in (True, False, None) or expr == None or isinstance(expr, (float, int, Decimal, Date)): return Literal(expr) elif is_text(expr): return Variable(expr) elif is_sequence(expr): return lang[TupleOp([_jx_expression(e, lang) for e in expr])] # expr = wrap(expr) try: items = items_(expr) for op, term in items: # ONE OF THESE IS THE OPERATOR full_op = operators.get(op) if full_op: class_ = lang.ops[full_op.id] if class_: return class_.define(expr) # THIS LANGUAGE DOES NOT SUPPORT THIS OPERATOR, GOTO BASE LANGUAGE AND GET THE MACRO class_ = language[op.id] output = class_.define(expr).partial_eval() return _jx_expression(output, lang) else: if not items: return NULL raise Log.error("{{instruction|json}} is not known", instruction=items) except Exception as e: Log.error("programmer error expr = {{value|quote}}", value=expr, cause=e)
WRAP A JSON EXPRESSION WITH OBJECT REPRESENTATION
def vdp_vlan_change_internal(self, vsw_cb_data, vdp_vlan, fail_reason): """Callback Function from VDP when provider VLAN changes. This will be called only during error cases when switch reloads or when compute reloads. """ LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan) if not vsw_cb_data: LOG.error("NULL vsw_cb_data Info received") return net_uuid = vsw_cb_data.get('net_uuid') port_uuid = vsw_cb_data.get('port_uuid') lvm = self.local_vlan_map.get(net_uuid) if not lvm: LOG.error("Network %s is not in the local vlan map", net_uuid) return lldpad_port = self.lldpad_info if not lldpad_port: LOG.error("There is no LLDPad port available.") return exist_vdp_vlan = lvm.late_binding_vlan lvid = lvm.vlan LOG.debug("lvid %(lvid)s exist %(vlan)s", {'lvid': lvid, 'vlan': exist_vdp_vlan}) lvm.decr_reset_vlan(port_uuid, vdp_vlan) lvm.set_fail_reason(port_uuid, fail_reason) self.vdp_vlan_cb(port_uuid, lvid, vdp_vlan, fail_reason) if vdp_vlan == exist_vdp_vlan: LOG.debug("No change in provider VLAN %s", vdp_vlan) return # Logic is if the VLAN changed to 0, clear the flows only if none of # the VM's in the network has a valid VLAN. if not ovs_lib.is_valid_vlan_tag(vdp_vlan): if ovs_lib.is_valid_vlan_tag(exist_vdp_vlan) and not ( lvm.any_valid_vlan()): # Clear the old flows LOG.debug("Clearing flows, no valid vlans") self.program_vm_ovs_flows(lvid, exist_vdp_vlan, 0) lvm.late_binding_vlan = 0 lvm.vdp_nego_req = False else: # If any VM gets a VLAN change, we immediately modify the flow. # This is done to not wait for all VM's VLAN getting updated from # switch. Logic is if any VM gts a new VLAN, the other VM's of the # same network will be updated eventually. if vdp_vlan != exist_vdp_vlan and ( ovs_lib.is_valid_vlan_tag(vdp_vlan)): # Add the new flows and remove the old flows LOG.warning("Non Zero VDP Vlan change %s %s" % (vdp_vlan, exist_vdp_vlan)) self.program_vm_ovs_flows(lvid, exist_vdp_vlan, vdp_vlan) lvm.late_binding_vlan = vdp_vlan lvm.vdp_nego_req = False else: LOG.error("Invalid or same VLAN Exist %(exist)s " "New %(new)s VLANs", {'exist': exist_vdp_vlan, 'new': vdp_vlan})
Callback Function from VDP when provider VLAN changes. This will be called only during error cases when switch reloads or when compute reloads.
def _put_file(self, file): """Send PUT request to S3 with file contents""" post_params = { 'file_size': file.size, 'file_hash': file.md5hash(), 'content_type': self._get_content_type(file), } headers = self._request_headers('PUT', file.prefixed_name, post_params=post_params) with closing(HTTPConnection(self.netloc)) as conn: conn.request('PUT', file.prefixed_name, file.read(), headers=headers) response = conn.getresponse() if response.status not in (200,): raise S3IOError( 'py3s3 PUT error. ' 'Response status: {}. ' 'Reason: {}. ' 'Response Text: \n' '{}'.format(response.status, response.reason, response.read()))
Send PUT request to S3 with file contents
def reduce(self, colors): """Converts color codes into optimized text This optimizer works by merging adjacent colors so we don't have to repeat the same escape codes for each pixel. There is no loss of information. :param colors: Iterable yielding an xterm color code for each pixel, None to indicate a transparent pixel, or ``'EOL'`` to indicate th end of a line. :return: Yields lines of optimized text. """ need_reset = False line = [] for color, items in itertools.groupby(colors): if color is None: if need_reset: line.append("\x1b[49m") need_reset = False line.append(self.pad * len(list(items))) elif color == "EOL": if need_reset: line.append("\x1b[49m") need_reset = False yield "".join(line) else: line.pop() yield "".join(line) line = [] else: need_reset = True line.append("\x1b[48;5;%dm%s" % ( color, self.pad * len(list(items))))
Converts color codes into optimized text This optimizer works by merging adjacent colors so we don't have to repeat the same escape codes for each pixel. There is no loss of information. :param colors: Iterable yielding an xterm color code for each pixel, None to indicate a transparent pixel, or ``'EOL'`` to indicate th end of a line. :return: Yields lines of optimized text.
def as_graph(self) -> Digraph: # pragma: no cover """Renders the discrimination net as graphviz digraph.""" if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') dot = Digraph() nodes = set() queue = [self._root] while queue: state = queue.pop(0) if not state.payload: dot.node('n{!s}'.format(state.id), '', {'shape': ('circle' if state else 'doublecircle')}) else: dot.node('n{!s}'.format(state.id), '\n'.join(map(str, state.payload)), {'shape': 'box'}) for next_state in state.values(): if next_state.id not in nodes: queue.append(next_state) nodes.add(state.id) nodes = set() queue = [self._root] while queue: state = queue.pop(0) if state.id in nodes: continue nodes.add(state.id) for (label, other) in state.items(): dot.edge('n{!s}'.format(state.id), 'n{!s}'.format(other.id), _term_str(label)) if other.id not in nodes: queue.append(other) return dot
Renders the discrimination net as graphviz digraph.
def get_adjacency_matrix(self, fmt='coo'): r""" Returns an adjacency matrix in the specified sparse format, with 1's indicating the non-zero values. Parameters ---------- fmt : string, optional The sparse storage format to return. Options are: **'coo'** : (default) This is the native format of OpenPNM data **'lil'** : Enables row-wise slice of the matrix **'csr'** : Favored by most linear algebra routines **'dok'** : Enables subscript access of locations Notes ----- This method will only create the requested matrix in the specified format if one is not already saved on the object. If not present, this method will create and return the matrix, as well as store it for future use. To obtain a matrix with weights other than ones at each non-zero location use ``create_adjacency_matrix``. """ # Retrieve existing matrix if available if fmt in self._am.keys(): am = self._am[fmt] elif self._am.keys(): am = self._am[list(self._am.keys())[0]] tofmt = getattr(am, 'to'+fmt) am = tofmt() self._am[fmt] = am else: am = self.create_adjacency_matrix(weights=self.Ts, fmt=fmt) self._am[fmt] = am return am
r""" Returns an adjacency matrix in the specified sparse format, with 1's indicating the non-zero values. Parameters ---------- fmt : string, optional The sparse storage format to return. Options are: **'coo'** : (default) This is the native format of OpenPNM data **'lil'** : Enables row-wise slice of the matrix **'csr'** : Favored by most linear algebra routines **'dok'** : Enables subscript access of locations Notes ----- This method will only create the requested matrix in the specified format if one is not already saved on the object. If not present, this method will create and return the matrix, as well as store it for future use. To obtain a matrix with weights other than ones at each non-zero location use ``create_adjacency_matrix``.
def list_variables(self): """ List available variables and applies any filters. """ station_codes = self._get_station_codes() station_codes = self._apply_features_filter(station_codes) variables = self._list_variables(station_codes) if hasattr(self, "_variables") and self.variables is not None: variables.intersection_update(set(self.variables)) return list(variables)
List available variables and applies any filters.
def create_extended_model(model, db_penalty=None, ex_penalty=None, tp_penalty=None, penalties=None): """Create an extended model for gap-filling. Create a :class:`psamm.metabolicmodel.MetabolicModel` with all reactions added (the reaction database in the model is taken to be the universal database) and also with artificial exchange and transport reactions added. Return the extended :class:`psamm.metabolicmodel.MetabolicModel` and a weight dictionary for added reactions in that model. Args: model: :class:`psamm.datasource.native.NativeModel`. db_penalty: penalty score for database reactions, default is `None`. ex_penalty: penalty score for exchange reactions, default is `None`. tb_penalty: penalty score for transport reactions, default is `None`. penalties: a dictionary of penalty scores for database reactions. """ # Create metabolic model model_extended = model.create_metabolic_model() extra_compartment = model.extracellular_compartment compartment_ids = set(c.id for c in model.compartments) # Add database reactions to extended model if len(compartment_ids) > 0: logger.info( 'Using all database reactions in compartments: {}...'.format( ', '.join('{}'.format(c) for c in compartment_ids))) db_added = add_all_database_reactions(model_extended, compartment_ids) else: logger.warning( 'No compartments specified in the model; database reactions will' ' not be used! Add compartment specification to model to include' ' database reactions for those compartments.') db_added = set() # Add exchange reactions to extended model logger.info( 'Using artificial exchange reactions for compartment: {}...'.format( extra_compartment)) ex_added = add_all_exchange_reactions( model_extended, extra_compartment, allow_duplicates=True) # Add transport reactions to extended model boundaries = model.compartment_boundaries if len(boundaries) > 0: logger.info( 'Using artificial transport reactions for the compartment' ' boundaries: {}...'.format( '; '.join('{}<->{}'.format(c1, c2) for c1, c2 in boundaries))) tp_added = add_all_transport_reactions( model_extended, boundaries, allow_duplicates=True) else: logger.warning( 'No compartment boundaries specified in the model;' ' artificial transport reactions will not be used!') tp_added = set() # Add penalty weights on reactions weights = {} if db_penalty is not None: weights.update((rxnid, db_penalty) for rxnid in db_added) if tp_penalty is not None: weights.update((rxnid, tp_penalty) for rxnid in tp_added) if ex_penalty is not None: weights.update((rxnid, ex_penalty) for rxnid in ex_added) if penalties is not None: for rxnid, penalty in iteritems(penalties): weights[rxnid] = penalty return model_extended, weights
Create an extended model for gap-filling. Create a :class:`psamm.metabolicmodel.MetabolicModel` with all reactions added (the reaction database in the model is taken to be the universal database) and also with artificial exchange and transport reactions added. Return the extended :class:`psamm.metabolicmodel.MetabolicModel` and a weight dictionary for added reactions in that model. Args: model: :class:`psamm.datasource.native.NativeModel`. db_penalty: penalty score for database reactions, default is `None`. ex_penalty: penalty score for exchange reactions, default is `None`. tb_penalty: penalty score for transport reactions, default is `None`. penalties: a dictionary of penalty scores for database reactions.
def sort_by_speedup(self, reverse=True): """Sort the configurations in place. items with highest speedup come first""" self._confs.sort(key=lambda c: c.speedup, reverse=reverse) return self
Sort the configurations in place. items with highest speedup come first
def _getHead(self, branch): """Return a deferred for branch head revision or None. We'll get an error if there is no head for this branch, which is probably a good thing, since it's probably a misspelling (if really buildbotting a branch that does not have any changeset yet, one shouldn't be surprised to get errors) """ d = utils.getProcessOutput(self.hgbin, ['heads', '-r', branch, '--template={rev}' + os.linesep], path=self._absWorkdir(), env=os.environ, errortoo=False) @d.addErrback def no_head_err(exc): log.err("hgpoller: could not find revision %r in repository %r" % ( branch, self.repourl)) @d.addCallback def results(heads): if not heads: return if len(heads.split()) > 1: log.err(("hgpoller: caught several heads in branch %r " "from repository %r. Staying at previous revision" "You should wait until the situation is normal again " "due to a merge or directly strip if remote repo " "gets stripped later.") % (branch, self.repourl)) return # in case of whole reconstruction, are we sure that we'll get the # same node -> rev assignations ? return heads.strip().decode(self.encoding) return d
Return a deferred for branch head revision or None. We'll get an error if there is no head for this branch, which is probably a good thing, since it's probably a misspelling (if really buildbotting a branch that does not have any changeset yet, one shouldn't be surprised to get errors)
def _run_events(self, tag, stage=None): """Run tests marked with a particular tag and stage""" self._run_event_methods(tag, stage) self._run_tests(tag, stage)
Run tests marked with a particular tag and stage
def search(self, query_string, **kwargs): """ The main search method :param query_string: The string to pass to Elasticsearch. e.g. '*:*' :param kwargs: start_offset, end_offset, result_class :return: result_class instance """ self.index_name = self._index_name_for_language(translation.get_language()) # self.log.debug('search method called (%s): %s' % # (translation.get_language(), query_string)) return super(ElasticsearchMultilingualSearchBackend, self).search(query_string, **kwargs)
The main search method :param query_string: The string to pass to Elasticsearch. e.g. '*:*' :param kwargs: start_offset, end_offset, result_class :return: result_class instance
def weeks_per_year(year): '''Number of ISO weeks in a year''' # 53 weeks: any year starting on Thursday and any leap year starting on Wednesday jan1 = jwday(gregorian.to_jd(year, 1, 1)) if jan1 == THU or (jan1 == WED and isleap(year)): return 53 else: return 52
Number of ISO weeks in a year
def _process_op_err(self, e): """ Process errors which occured while reading or parsing the protocol. If allow_reconnect is enabled it will try to switch the server to which it is currently connected otherwise it will disconnect. """ if self.is_connecting or self.is_closed or self.is_reconnecting: return if self.options["allow_reconnect"] and self.is_connected: self._status = Client.RECONNECTING self._ps.reset() if self._reconnection_task is not None and not self._reconnection_task.cancelled(): # Cancel the previous task in case it may still be running. self._reconnection_task.cancel() self._reconnection_task = self._loop.create_task(self._attempt_reconnect()) else: self._process_disconnect() self._err = e yield from self._close(Client.CLOSED, True)
Process errors which occured while reading or parsing the protocol. If allow_reconnect is enabled it will try to switch the server to which it is currently connected otherwise it will disconnect.
def RawData(self): """Yields the valus in each section.""" result = collections.OrderedDict() i = 0 while True: try: name, value, value_type = winreg.EnumValue(self._AccessRootKey(), i) # Only support strings here. if value_type == winreg.REG_SZ: precondition.AssertType(value, Text) result[name] = value except OSError: break i += 1 return result
Yields the valus in each section.
def top_k_logits(logits, k): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator.
def _get_notify_msg_and_payload(result, stream): """Get notify message and payload dict""" token = stream.advance_past_chars(["=", "*"]) token = int(token) if token != "" else None logger.debug("%s", fmt_green("parsing message")) message = stream.advance_past_chars([","]) logger.debug("parsed message") logger.debug("%s", fmt_green(message)) payload = _parse_dict(stream) return token, message.strip(), payload
Get notify message and payload dict
def ExportNEP2(self, passphrase): """ Export the encrypted private key in NEP-2 format. Args: passphrase (str): The password to encrypt the private key with, as unicode string Returns: str: The NEP-2 encrypted private key """ if len(passphrase) < 2: raise ValueError("Passphrase must have a minimum of 2 characters") # Hash address twice, then only use the first 4 bytes address_hash_tmp = hashlib.sha256(self.GetAddress().encode("utf-8")).digest() address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest() address_hash = address_hash_tmp2[:4] # Normalize password and run scrypt over it with the address_hash pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8') derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES) # Split the scrypt-result into two parts derived1 = derived[:32] derived2 = derived[32:] # Run XOR and encrypt the derived parts with AES xor_ed = xor_bytes(bytes(self.PrivateKey), derived1) cipher = AES.new(derived2, AES.MODE_ECB) encrypted = cipher.encrypt(xor_ed) # Assemble the final result assembled = bytearray() assembled.extend(NEP_HEADER) assembled.extend(NEP_FLAG) assembled.extend(address_hash) assembled.extend(encrypted) # Finally, encode with Base58Check encrypted_key_nep2 = base58.b58encode_check(bytes(assembled)) return encrypted_key_nep2.decode("utf-8")
Export the encrypted private key in NEP-2 format. Args: passphrase (str): The password to encrypt the private key with, as unicode string Returns: str: The NEP-2 encrypted private key
def sg_float(tensor, opt): r"""Casts a tensor to floatx. See `tf.cast()` in tensorflow. Args: tensor: A `Tensor` or `SparseTensor` (automatically given by chain). opt: name : If provided, it replaces current tensor's name Returns: A `Tensor` or `SparseTensor` with same shape as `tensor`. """ return tf.cast(tensor, tf.sg_floatx, name=opt.name)
r"""Casts a tensor to floatx. See `tf.cast()` in tensorflow. Args: tensor: A `Tensor` or `SparseTensor` (automatically given by chain). opt: name : If provided, it replaces current tensor's name Returns: A `Tensor` or `SparseTensor` with same shape as `tensor`.
def submit(args): gpus = args.gpus.strip().split(',') """Submit function of local jobs.""" def mthread_submit(nworker, nserver, envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters ---------- nworker: number of slave process to start up nserver: number of server nodes to start up envs: enviroment variables to be added to the starting programs """ procs = {} for i, gpu in enumerate(gpus): for j in range(args.num_threads): procs[i] = Thread(target=exec_cmd, args=(args.command + ['--gpus=%s'%gpu], 'worker', i*args.num_threads+j, envs)) procs[i].setDaemon(True) procs[i].start() for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver): procs[i] = Thread(target=exec_cmd, args=(args.command, 'server', i, envs)) procs[i].setDaemon(True) procs[i].start() # call submit, with nslave, the commands to run each job and submit function tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit, pscmd=(' '.join(args.command)))
Submit function of local jobs.
def get_playlist_songs(self, playlist_id, limit=1000): """Get a playlists's all songs. :params playlist_id: playlist id. :params limit: length of result returned by weapi. :return: a list of Song object. """ url = 'http://music.163.com/weapi/v3/playlist/detail?csrf_token=' csrf = '' params = {'id': playlist_id, 'offset': 0, 'total': True, 'limit': limit, 'n': 1000, 'csrf_token': csrf} result = self.post_request(url, params) songs = result['playlist']['tracks'] songs = [Song(song['id'], song['name']) for song in songs] return songs
Get a playlists's all songs. :params playlist_id: playlist id. :params limit: length of result returned by weapi. :return: a list of Song object.
def Approval(self, username, approval_id): """Returns a reference to an approval.""" return ClientApprovalRef( client_id=self.client_id, username=username, approval_id=approval_id, context=self._context)
Returns a reference to an approval.
def unicode_urlencode(query, doseq=True): """ Custom wrapper around urlencode to support unicode Python urlencode doesn't handle unicode well so we need to convert to bytestrings before using it: http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround """ pairs = [] for key, value in query.items(): if isinstance(value, list): value = list(map(to_utf8, value)) else: value = to_utf8(value) pairs.append((to_utf8(key), value)) encoded_query = dict(pairs) xx = urlencode(encoded_query, doseq) return xx
Custom wrapper around urlencode to support unicode Python urlencode doesn't handle unicode well so we need to convert to bytestrings before using it: http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
def clean_global_runtime_state(reset_subsystem=False): """Resets the global runtime state of a pants runtime for cleaner forking. :param bool reset_subsystem: Whether or not to clean Subsystem global state. """ if reset_subsystem: # Reset subsystem state. Subsystem.reset() # Reset Goals and Tasks. Goal.clear() # Reset global plugin state. BuildConfigInitializer.reset()
Resets the global runtime state of a pants runtime for cleaner forking. :param bool reset_subsystem: Whether or not to clean Subsystem global state.
def reasonable_desired_version(self, desired_version, allow_equal=False, allow_patch_skip=False): """ Determine whether the desired version is a reasonable next version. Parameters ---------- desired_version: str the proposed next version name """ try: desired_version = desired_version.base_version except: pass (new_major, new_minor, new_patch) = \ map(int, desired_version.split('.')) tag_versions = self._versions_from_tags() if not tag_versions: # no tags yet, and legal version is legal! return "" max_version = max(self._versions_from_tags()).base_version (old_major, old_minor, old_patch) = \ map(int, str(max_version).split('.')) update_str = str(max_version) + " -> " + str(desired_version) v_desired = vers.Version(desired_version) v_max = vers.Version(max_version) if allow_equal and v_desired == v_max: return "" if v_desired < v_max: return ("Bad update: New version doesn't increase on last tag: " + update_str + "\n") bad_update = skipped_version((old_major, old_minor, old_patch), (new_major, new_minor, new_patch), allow_patch_skip) msg = "" if bad_update: msg = ("Bad update: Did you skip a version from " + update_str + "?\n") return msg
Determine whether the desired version is a reasonable next version. Parameters ---------- desired_version: str the proposed next version name
def record(self): # type: () -> bytes ''' A method to generate a string representing this El Torito Entry. Parameters: None. Returns: String representing this El Torito Entry. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized') return struct.pack(self.FMT, self.boot_indicator, self.boot_media_type, self.load_segment, self.system_type, 0, self.sector_count, self.load_rba, self.selection_criteria_type, self.selection_criteria)
A method to generate a string representing this El Torito Entry. Parameters: None. Returns: String representing this El Torito Entry.
def verify_constraints(constraints): """ Verify values returned from :meth:`make_constraints`. Used internally during the :meth:`build` process. :param constraints: value returned from :meth:`make_constraints` :type constraints: :class:`list` :raises ValueError: if verification fails """ # verify return is a list if not isinstance(constraints, list): raise ValueError( "invalid type returned by make_constraints: %r (must be a list)" % constraints ) # verify each list element is a Constraint instance for constraint in constraints: if not isinstance(constraint, Constraint): raise ValueError( "invalid constraint type: %r (must be a Constriant)" % constraint )
Verify values returned from :meth:`make_constraints`. Used internally during the :meth:`build` process. :param constraints: value returned from :meth:`make_constraints` :type constraints: :class:`list` :raises ValueError: if verification fails
def _fetch_and_parse_messages(self, mailing_list, from_date): """Fetch and parse the messages from a mailing list""" from_date = datetime_to_utc(from_date) nmsgs, imsgs, tmsgs = (0, 0, 0) for mbox in mailing_list.mboxes: tmp_path = None try: tmp_path = self._copy_mbox(mbox) for message in self.parse_mbox(tmp_path): tmsgs += 1 if not self._validate_message(message): imsgs += 1 continue # Ignore those messages sent before the given date dt = str_to_datetime(message[MBox.DATE_FIELD]) if dt < from_date: logger.debug("Message %s sent before %s; skipped", message['unixfrom'], str(from_date)) tmsgs -= 1 continue # Convert 'CaseInsensitiveDict' to dict message = self._casedict_to_dict(message) nmsgs += 1 logger.debug("Message %s parsed", message['unixfrom']) yield message except (OSError, EOFError) as e: logger.warning("Ignoring %s mbox due to: %s", mbox.filepath, str(e)) except Exception as e: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) raise e finally: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) logger.info("Done. %s/%s messages fetched; %s ignored", nmsgs, tmsgs, imsgs)
Fetch and parse the messages from a mailing list
def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): """Read Interoperability tags from file and return as dict.""" tag_names = {1: 'InteroperabilityIndex'} return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1)
Read Interoperability tags from file and return as dict.
def make_while_loop(test_and_body_instrs, else_body_instrs, context): """ Make an ast.While node. Parameters ---------- test_and_body_instrs : deque Queue of instructions forming the loop test expression and body. else_body_instrs : deque Queue of instructions forming the else block of the loop. context : DecompilationContext """ top_of_loop = test_and_body_instrs[0] # The popped elements are the stack_builders for the loop test expression. # The top of the loop_body_instrs is either a POP_JUMP_IF_TRUE or a # POP_JUMP_IF_FALSE. test, body_instrs = make_while_loop_test_expr(test_and_body_instrs) body, orelse_body = make_loop_body_and_orelse( top_of_loop, body_instrs, else_body_instrs, context, ) # while-else blocks are not yet supported or handled. return ast.While(test=test, body=body, orelse=orelse_body)
Make an ast.While node. Parameters ---------- test_and_body_instrs : deque Queue of instructions forming the loop test expression and body. else_body_instrs : deque Queue of instructions forming the else block of the loop. context : DecompilationContext
def set_aromatic(self): """set the cycle to be an aromatic ring""" #XXX FIX ME # this probably shouldn't be here for atom in self.atoms: atom.aromatic = 1 for bond in self.bonds: bond.aromatic = 1 bond.bondorder = 1.5 bond.bondtype = 4 bond.symbol = ":" bond.fixed = 1 self.aromatic = 1
set the cycle to be an aromatic ring
def MessageSetItemDecoder(extensions_by_number): """Returns a decoder for a MessageSet item. The parameter is the _extensions_by_number map for the message class. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) local_ReadTag = ReadTag local_DecodeVarint = _DecodeVarint local_SkipField = SkipField def DecodeItem(buffer, pos, end, message, field_dict): message_set_item_start = pos type_id = -1 message_start = -1 message_end = -1 # Technically, type_id and message can appear in any order, so we need # a little loop here. while 1: (tag_bytes, pos) = local_ReadTag(buffer, pos) if tag_bytes == type_id_tag_bytes: (type_id, pos) = local_DecodeVarint(buffer, pos) elif tag_bytes == message_tag_bytes: (size, message_start) = local_DecodeVarint(buffer, pos) pos = message_end = message_start + size elif tag_bytes == item_end_tag_bytes: break else: pos = SkipField(buffer, pos, end, tag_bytes) if pos == -1: raise _DecodeError('Missing group end tag.') if pos > end: raise _DecodeError('Truncated message.') if type_id == -1: raise _DecodeError('MessageSet item missing type_id.') if message_start == -1: raise _DecodeError('MessageSet item missing message.') extension = extensions_by_number.get(type_id) if extension is not None: value = field_dict.get(extension) if value is None: value = field_dict.setdefault( extension, extension.message_type._concrete_class()) if value._InternalParse(buffer, message_start,message_end) != message_end: # The only reason _InternalParse would return early is if it encountered # an end-group tag. raise _DecodeError('Unexpected end-group tag.') else: if not message._unknown_fields: message._unknown_fields = [] message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos])) return pos return DecodeItem
Returns a decoder for a MessageSet item. The parameter is the _extensions_by_number map for the message class. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } }
def _fetchAllChildren(self): """ Fetches all sub groups and variables that this group contains. """ assert self._h5Group is not None, "dataset undefined (file not opened?)" assert self.canFetchChildren(), "canFetchChildren must be True" childItems = [] for childName, h5Child in self._h5Group.items(): if isinstance(h5Child, h5py.Group): childItems.append(H5pyGroupRti(h5Child, nodeName=childName, fileName=self.fileName)) elif isinstance(h5Child, h5py.Dataset): if len(h5Child.shape) == 0: childItems.append(H5pyScalarRti(h5Child, nodeName=childName, fileName=self.fileName)) else: childItems.append(H5pyDatasetRti(h5Child, nodeName=childName, fileName=self.fileName)) elif isinstance(h5Child, h5py.Datatype): #logger.debug("Ignored DataType item: {}".format(childName)) pass else: logger.warn("Ignored {}. It has an unexpected HDF-5 type: {}" .format(childName, type(h5Child))) return childItems
Fetches all sub groups and variables that this group contains.
def _schema_nodes(self): """parse self._ontology_file into a graph""" name, ext = os.path.splitext(self._ontology_file) if ext in ['.ttl']: self._ontology_parser_function = \ lambda s: rdflib.Graph().parse(s, format='n3') else: self._ontology_parser_function = \ lambda s: pyRdfa().graph_from_source(s) if not self._ontology_parser_function: raise ValueError( "No function found to parse ontology. %s" % self.errorstring_base) if not self._ontology_file: raise ValueError( "No ontology file specified. %s" % self.errorstring_base) if not self.lexicon: raise ValueError( "No lexicon object assigned. %s" % self.errorstring_base) latest_file = self._read_schema() try: self.graph = self._ontology_parser_function(latest_file) except: raise IOError("Error parsing ontology at %s" % latest_file) for subj, pred, obj in self.graph: self.ontology[subj].append((pred, obj)) yield (subj, pred, obj)
parse self._ontology_file into a graph
def get(self, sid): """ Constructs a MessageContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.channel.message.MessageContext :rtype: twilio.rest.chat.v2.service.channel.message.MessageContext """ return MessageContext( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], sid=sid, )
Constructs a MessageContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.channel.message.MessageContext :rtype: twilio.rest.chat.v2.service.channel.message.MessageContext
def liste_stations(self, station=None, detail=False): """ Liste des stations Paramètres: station : un nom de station valide (si vide, liste toutes les stations) detail : si True, affiche plus de détail sur la (les) station(s). """ condition = "" if station: station = _format(station) condition = "WHERE IDENTIFIANT IN ('%s')" % station select = "" if detail: select = """, ISIT AS DESCRIPTION, NO_TELEPHONE AS TELEPHONE, ADRESSE_IP, LONGI AS LONGITUDE, LATI AS LATITUDE, ALTI AS ALTITUDE, AXE AS ADR, CODE_POSTAL AS CP, FLAG_VALID AS VALID""" _sql = """SELECT NSIT AS NUMERO, IDENTIFIANT AS STATION %s FROM STATION %s ORDER BY NSIT""" % (select, condition) return psql.read_sql(_sql, self.conn)
Liste des stations Paramètres: station : un nom de station valide (si vide, liste toutes les stations) detail : si True, affiche plus de détail sur la (les) station(s).
def _wrap_paginated_response(cls, request, response, controls, data, head=None): """Builds the metadata for a pagingated response and wraps everying in a JSON encoded web.Response """ paging_response = response['paging'] if head is None: head = response['head_id'] link = cls._build_url( request, head=head, start=paging_response['start'], limit=paging_response['limit']) paging = {} limit = controls.get('limit') start = controls.get("start") paging["limit"] = limit paging["start"] = start # If there are no resources, there should be nothing else in paging if paging_response.get("next") == "": return cls._wrap_response( request, data=data, metadata={ 'head': head, 'link': link, 'paging': paging }) next_id = paging_response['next'] paging['next_position'] = next_id # Builds paging urls specific to this response def build_pg_url(start=None): return cls._build_url(request, head=head, limit=limit, start=start) paging['next'] = build_pg_url(paging_response['next']) return cls._wrap_response( request, data=data, metadata={ 'head': head, 'link': link, 'paging': paging })
Builds the metadata for a pagingated response and wraps everying in a JSON encoded web.Response
def plot_polynomial( log, title, polynomialDict, orginalDataDictionary=False, pathToOutputPlotsFolder="~/Desktop", xRange=False, xlabel=False, ylabel=False, xAxisLimits=False, yAxisLimits=False, yAxisInvert=False, prependNum=False, legend=False): """ *Plot a dictionary of numpy lightcurves polynomials* **Key Arguments:** - ``log`` -- logger - ``title`` -- title for the plot - ``polynomialDict`` -- dictionary of polynomials { label01 : poly01, label02 : poly02 } - ``orginalDataDictionary`` -- the orginal data points {name: [x, y]} - ``pathToOutputPlotsFolder`` -- path the the output folder to save plot to - ``xRange`` -- the x-range for the polynomial [xmin, xmax, interval] - ``xlabel`` -- xlabel - ``ylabel`` -- ylabel - ``xAxisLimits`` -- the x-limits for the axes [xmin, xmax] - ``yAxisLimits`` -- the y-limits for the axes [ymin, ymax] - ``yAxisInvert`` -- invert the y-axis? Useful for lightcurves - ``prependNum`` -- prepend this number to the output filename - ``legend`` -- plot a legend? **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## import sys ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np ## LOCAL APPLICATION ## ################ >ACTION(S) ################ colors = { 'green': '#859900', 'blue': '#268bd2', 'red': '#dc322f', 'gray': '#D2D1D1', 'orange': '#cb4b16', 'violet': '#6c71c4', 'cyan': '#2aa198', 'magenta': '#d33682', 'yellow': '#b58900' } if not xRange: log.error('please provide an x-range') sys.exit(1) ax = plt.subplot(111) if len(xRange) == 2: x = np.arange(xRange[0] * 4, xRange[1] * 4, 1) x = x / 4. else: x = np.arange(xRange[0], xRange[1], xRange[2]) if xAxisLimits: ax.set_xlim(xAxisLimits[0], xAxisLimits[1]) else: overShoot = (xRange[1] - xRange[0]) / 10. ax.set_xlim(xRange[0] - overShoot, xRange[1] + overShoot) if yAxisLimits: ax.set_ylim(yAxisLimits[0], yAxisLimits[1]) theseColors = [colors['blue'], colors[ 'green'], colors['red'], colors['violet']] count = 0 if orginalDataDictionary: for name, data in orginalDataDictionary.iteritems(): ax.plot(data[0], data[1], '.', label=name, color=theseColors[count]) count += 1 if count == 4: count = 0 count = 0 for snType, poly in polynomialDict.iteritems(): log.debug('x: %s' % (x,)) ax.plot(x, poly(x), label='%s' % (snType,), color=theseColors[count]) count += 1 if count == 4: count = 0 # Shink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis if legend: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 8}) ax.titlesize = 'medium' # fontsize of the axes title ax.labelsize = 'medium' # fontsize of the x any y labels if xlabel: plt.xlabel(xlabel, fontsize='small') if ylabel: plt.ylabel(ylabel, fontsize='small') if title: plt.title(title, fontsize='small', verticalalignment='bottom', linespacing=0.2) if yAxisInvert: ax.invert_yaxis() # fileName = pathToOutputPlotsFolder + title.replace(" ", "_") + ".ps" # plt.savefig(fileName) if prependNum: title = "%02d_%s" % (prependNum, title) thisTitle = title.replace(" ", "_") thisTitle = thisTitle.replace("-", "_") fileName = pathToOutputPlotsFolder + thisTitle + ".png" imageLink = """ ![%s_plot](%s) """ % (thisTitle, fileName) plt.savefig(fileName) plt.clf() # clear figure return imageLink
*Plot a dictionary of numpy lightcurves polynomials* **Key Arguments:** - ``log`` -- logger - ``title`` -- title for the plot - ``polynomialDict`` -- dictionary of polynomials { label01 : poly01, label02 : poly02 } - ``orginalDataDictionary`` -- the orginal data points {name: [x, y]} - ``pathToOutputPlotsFolder`` -- path the the output folder to save plot to - ``xRange`` -- the x-range for the polynomial [xmin, xmax, interval] - ``xlabel`` -- xlabel - ``ylabel`` -- ylabel - ``xAxisLimits`` -- the x-limits for the axes [xmin, xmax] - ``yAxisLimits`` -- the y-limits for the axes [ymin, ymax] - ``yAxisInvert`` -- invert the y-axis? Useful for lightcurves - ``prependNum`` -- prepend this number to the output filename - ``legend`` -- plot a legend? **Return:** - None
def update_progress_bar( go, optext, start, total_files, files_sofar, total_bytes, bytes_sofar, stdin_upload=False): # type: (blobxfer.models.options.General, str, datetime.datetime, int, # int, int, int, bool) -> None """Update the progress bar :param blobxfer.models.options.General go: general options :param str optext: operation prefix text :param datetime.datetime start: start time :param int total_files: total number of files :param int files_sofar: files transfered so far :param int total_bytes: total number of bytes :param int bytes_sofar: bytes transferred so far :param bool stdin_upload: stdin upload """ if (go.quiet or not go.progress_bar or blobxfer.util.is_none_or_empty(go.log_file) or start is None): return diff = (blobxfer.util.datetime_now() - start).total_seconds() if diff <= 0: # arbitrarily give a small delta diff = 1e-9 if total_bytes is None or total_bytes == 0 or bytes_sofar > total_bytes: done = 0 else: done = float(bytes_sofar) / total_bytes rate = bytes_sofar / blobxfer.util.MEGABYTE / diff if optext == 'synccopy': rtext = 'sync-copied' else: rtext = optext + 'ed' if total_files is None: fprog = 'n/a' else: fprog = '{}/{}'.format(files_sofar, total_files) if stdin_upload: sys.stdout.write( ('\r{0} progress: [{1:30s}] n/a % {2:12.3f} MiB/sec, ' '{3} {4}').format( optext, '>' * int(total_bytes % 30), rate, fprog, rtext) ) else: sys.stdout.write( ('\r{0} progress: [{1:30s}] {2:.2f}% {3:12.3f} MiB/sec, ' '{4} {5}').format( optext, '>' * int(done * 30), done * 100, rate, fprog, rtext) ) if files_sofar == total_files: sys.stdout.write('\n') sys.stdout.flush()
Update the progress bar :param blobxfer.models.options.General go: general options :param str optext: operation prefix text :param datetime.datetime start: start time :param int total_files: total number of files :param int files_sofar: files transfered so far :param int total_bytes: total number of bytes :param int bytes_sofar: bytes transferred so far :param bool stdin_upload: stdin upload
def import_file_object(filename): """ Summary: Imports block filesystem object Args: :filename (str): block filesystem object Returns: dictionary obj (valid json file), file data object """ try: handle = open(filename, 'r') file_obj = handle.read() dict_obj = json.loads(file_obj) except IOError as e: logger.critical( 'import_file_object: %s error opening %s' % (str(e), str(filename)) ) raise e except ValueError: logger.info( '%s: import_file_object: %s not json. file object returned' % (inspect.stack()[0][3], str(filename)) ) return file_obj # reg file, not valid json return dict_obj
Summary: Imports block filesystem object Args: :filename (str): block filesystem object Returns: dictionary obj (valid json file), file data object
def downloadMARCXML(doc_id, library, base="nkc"): """ Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException """ downer = Downloader() data = downer.download( ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute( DOC_ID=doc_id, LIBRARY=library ) ) dom = dhtmlparser.parseString(data) # check if there are any errors # bad library error error = dom.find("login") if error: error_msg = error[0].find("error") if error_msg: raise LibraryNotFoundException( "Can't download document doc_id: '" + str(doc_id) + "' " + "(probably bad library: '" + library + "')!\nMessage: " + "\n".join(map(lambda x: x.getContent(), error_msg)) ) # another error - document not found error = dom.find("ill-get-doc") if error: error_msg = error[0].find("error") if error_msg: raise DocumentNotFoundException( "\n".join(map(lambda x: x.getContent(), error_msg)) ) return data
Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException
def get_caller_module(): """ Returns the name of the caller's module as a string. >>> get_caller_module() '__main__' """ stack = inspect.stack() assert len(stack) > 1 caller = stack[2][0] return caller.f_globals['__name__']
Returns the name of the caller's module as a string. >>> get_caller_module() '__main__'
def paginate(self, request, offset=0, limit=None): """Paginate queryset.""" return self.collection.offset(offset).limit(limit), self.collection.count()
Paginate queryset.
def rename(self, **mapping): """ The rename method allows stream parameters to be allocated to new names to avoid clashes with other stream parameters of the same name. Returns a new clone of the stream instance with the specified name mapping. """ params = {k: v for k, v in self.get_param_values() if k != 'name'} return self.__class__(rename=mapping, source=(self._source() if self._source else None), linked=self.linked, **params)
The rename method allows stream parameters to be allocated to new names to avoid clashes with other stream parameters of the same name. Returns a new clone of the stream instance with the specified name mapping.
def identify_missing(self, df, check_start=True): """ Identify missing data. Parameters ---------- df : pd.DataFrame() Dataframe to check for missing data. check_start : bool turns 0 to 1 for the first observation, to display the start of the data as the beginning of the missing data event Returns ------- pd.DataFrame(), str dataframe where 1 indicates missing data and 0 indicates reported data, returns the column name generated from the MDAL Query """ # Check start changes the first value of df to 1, when the data stream is initially missing # This allows the diff function to acknowledge the missing data data_missing = df.isnull() * 1 col_name = str(data_missing.columns[0]) # When there is no data stream at the beginning we change it to 1 if check_start & data_missing[col_name][0] == 1: data_missing[col_name][0] = 0 return data_missing, col_name
Identify missing data. Parameters ---------- df : pd.DataFrame() Dataframe to check for missing data. check_start : bool turns 0 to 1 for the first observation, to display the start of the data as the beginning of the missing data event Returns ------- pd.DataFrame(), str dataframe where 1 indicates missing data and 0 indicates reported data, returns the column name generated from the MDAL Query
def clear_text(self): """stub""" if (self.get_text_metadata().is_read_only() or self.get_text_metadata().is_required()): raise NoAccess() self.my_osid_object_form._my_map['text'] = \ dict(self.get_text_metadata().get_default_string_values()[0])
stub
def add_px_err(isoel, col1, col2, px_um, inplace=False): """Undo pixelation correction Isoelasticity lines are already corrected for pixelation effects as described in Mapping of Deformation to Apparent Young's Modulus in Real-Time Deformability Cytometry Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017) https://arxiv.org/abs/1704.00572. If the isoealsticity lines are displayed with deformation data that are not corrected, then the lines must be "un"-corrected, i.e. the pixelation error must be added to the lines to match the experimental data. Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] px_um: float Pixel size [µm] """ Isoelastics.check_col12(col1, col2) if "deform" in [col1, col2]: # add error for deformation sign = +1 else: # subtract error for circularity sign = -1 if col1 == "area_um": area_ax = 0 deci_ax = 1 else: area_ax = 1 deci_ax = 0 new_isoel = [] for iso in isoel: iso = np.array(iso, copy=not inplace) ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax], px_um=px_um) iso[:, deci_ax] += sign * ddeci new_isoel.append(iso) return new_isoel
Undo pixelation correction Isoelasticity lines are already corrected for pixelation effects as described in Mapping of Deformation to Apparent Young's Modulus in Real-Time Deformability Cytometry Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017) https://arxiv.org/abs/1704.00572. If the isoealsticity lines are displayed with deformation data that are not corrected, then the lines must be "un"-corrected, i.e. the pixelation error must be added to the lines to match the experimental data. Parameters ---------- isoel: list of 2d ndarrays of shape (N, 3) Each item in the list corresponds to one isoelasticity line. The first column is defined by `col1`, the second by `col2`, and the third column is the emodulus. col1, col2: str Define the fist to columns of each isoelasticity line. One of ["area_um", "circ", "deform"] px_um: float Pixel size [µm]
def preorder_iter(self, filter_fn=None): """ From DendroPy Preorder traversal of self and its child_nodes. Returns self and all descendants such that a node is returned before its child_nodes (and their child_nodes). Filtered by filter_fn: node is only returned if no filter_fn is given or if filter_fn returns True. """ stack = [self] while stack: node = stack.pop() if filter_fn is None or filter_fn(node): yield node stack.extend([i for i in reversed(node._children)])
From DendroPy Preorder traversal of self and its child_nodes. Returns self and all descendants such that a node is returned before its child_nodes (and their child_nodes). Filtered by filter_fn: node is only returned if no filter_fn is given or if filter_fn returns True.
def get_el_sp(obj): """ Utility method to get an Element or Specie from an input obj. If obj is in itself an element or a specie, it is returned automatically. If obj is an int or a string representing an integer, the Element with the atomic number obj is returned. If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing which Element parsing will be attempted (e.g., Mn), failing which DummyElement parsing will be attempted. Args: obj (Element/Specie/str/int): An arbitrary object. Supported objects are actual Element/Specie objects, integers (representing atomic numbers) or strings (element symbols or species strings). Returns: Specie or Element, with a bias for the maximum number of properties that can be determined. Raises: ValueError if obj cannot be converted into an Element or Specie. """ if isinstance(obj, (Element, Specie, DummySpecie)): return obj if isinstance(obj, (list, tuple)): return [get_el_sp(o) for o in obj] try: c = float(obj) i = int(c) i = i if i == c else None except (ValueError, TypeError): i = None if i is not None: return Element.from_Z(i) try: return Specie.from_string(obj) except (ValueError, KeyError): try: return Element(obj) except (ValueError, KeyError): try: return DummySpecie.from_string(obj) except: raise ValueError("Can't parse Element or String from type" " %s: %s." % (type(obj), obj))
Utility method to get an Element or Specie from an input obj. If obj is in itself an element or a specie, it is returned automatically. If obj is an int or a string representing an integer, the Element with the atomic number obj is returned. If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing which Element parsing will be attempted (e.g., Mn), failing which DummyElement parsing will be attempted. Args: obj (Element/Specie/str/int): An arbitrary object. Supported objects are actual Element/Specie objects, integers (representing atomic numbers) or strings (element symbols or species strings). Returns: Specie or Element, with a bias for the maximum number of properties that can be determined. Raises: ValueError if obj cannot be converted into an Element or Specie.
def queues(self, page=None, per_page=None, previous=None, prefix=None): """Execute an HTTP request to get a list of queues and return it. Keyword arguments: page -- The 0-based page to get queues from. Defaults to None, which omits the parameter. """ options = {} if page is not None: raise Exception('page param is deprecated!') if per_page is not None: options['per_page'] = per_page if previous is not None: options['previous'] = previous if prefix is not None: options['prefix'] = prefix query = urlencode(options) url = 'queues' if query != '': url = "%s?%s" % (url, query) result = self.client.get(url) return [queue['name'] for queue in result['body']['queues']]
Execute an HTTP request to get a list of queues and return it. Keyword arguments: page -- The 0-based page to get queues from. Defaults to None, which omits the parameter.
def foreach_(ctx, seq, expr): ''' Yields the result of applying an expression to each item in the input sequence. * seq: input sequence * expr: expression to be converted to string, then dynamically evaluated for each item on the sequence to produce the result ''' from . import context, parse as uxpathparse if hasattr(seq, 'compute'): seq = seq.compute(ctx) expr = next(string_arg(ctx, expr), '') pexpr = uxpathparse(expr) for item in seq: innerctx = ctx.copy(item=item) yield from pexpr.compute(innerctx)
Yields the result of applying an expression to each item in the input sequence. * seq: input sequence * expr: expression to be converted to string, then dynamically evaluated for each item on the sequence to produce the result
def decode_varint_1(buffer, pos=0): """ Decode an integer from a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: buffer (bytes-like): any object acceptable by ``memoryview`` pos (int): optional position to read from Returns: (int, int): Decoded int value and next read position """ value = 0 shift = 0 memview = memoryview(buffer) for i in range(pos, pos + 10): try: byte = _read_byte(memview, i) except IndexError: raise ValueError("End of byte stream") if byte & 0x80 != 0: value |= (byte & 0x7f) << shift shift += 7 else: value |= byte << shift break else: # Max size of endcoded double is 10 bytes for unsigned values raise ValueError("Out of double range") # Normalize sign return (value >> 1) ^ -(value & 1), i + 1
Decode an integer from a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: buffer (bytes-like): any object acceptable by ``memoryview`` pos (int): optional position to read from Returns: (int, int): Decoded int value and next read position
def filter_by(zips=_zips, **kwargs): """ Use `kwargs` to select for desired attributes from list of zipcode dicts """ return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])]
Use `kwargs` to select for desired attributes from list of zipcode dicts
def fillDataProducts(self, dps): """Fills listview with existing data products""" item = None for dp in dps: if not dp.ignored: item = self._makeDPItem(self, dp, item) # ensure combobox widgets are made self._itemComboBox(item, self.ColAction) self._itemComboBox(item, self.ColRender)
Fills listview with existing data products
def layout(request, ident, stateless=False, cache_id=None, **kwargs): 'Return the layout of the dash application' _, app = DashApp.locate_item(ident, stateless) view_func = app.locate_endpoint_function('dash-layout') resp = view_func() initial_arguments = get_initial_arguments(request, cache_id) response_data, mimetype = app.augment_initial_layout(resp, initial_arguments) return HttpResponse(response_data, content_type=mimetype)
Return the layout of the dash application
def _get_wv(sentence, ignore=False): ''' get word2vec data by sentence sentence is segmented string. ''' global _vectors vectors = [] for y in sentence: y_ = any2unicode(y).strip() if y_ not in _stopwords: syns = nearby(y_)[0] # print("sentence %s word: %s" %(sentence, y_)) # print("sentence %s word nearby: %s" %(sentence, " ".join(syns))) c = [] try: c.append(_vectors.word_vec(y_)) except KeyError as error: if ignore: continue else: logging.warning("not exist in w2v model: %s" % y_) # c.append(np.zeros((100,), dtype=float)) random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1))) c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,))) for n in syns: if n is None: continue try: v = _vectors.word_vec(any2unicode(n)) except KeyError as error: # v = np.zeros((100,), dtype=float) random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1))) v = random_state.uniform(low=10.0, high=10.0, size=(100,)) c.append(v) r = np.average(c, axis=0) vectors.append(r) return vectors
get word2vec data by sentence sentence is segmented string.
def parse(self, limit=None): """ Override Source.parse() Args: :param limit (int, optional) limit the number of rows processed Returns: :return None """ if limit is not None: LOG.info("Only parsing first %d rows", limit) rgd_file = '/'.join( (self.rawdir, self.files['rat_gene2mammalian_phenotype']['file'])) # ontobio gafparser implemented here p = GafParser() assocs = p.parse(open(rgd_file, "r")) for i, assoc in enumerate(assocs): if 'relation' in assoc.keys(): self.make_association(assoc) if limit is not None and i > limit: break return
Override Source.parse() Args: :param limit (int, optional) limit the number of rows processed Returns: :return None
def createEditor(self, delegate, parent, option): """ Creates a ColorCtiEditor. For the parameters see the AbstractCti constructor documentation. """ return ColorCtiEditor(self, delegate, parent=parent)
Creates a ColorCtiEditor. For the parameters see the AbstractCti constructor documentation.
def list_images(self): """Gets Docker image list. :returns: list of dicts :rtype: list """ images = [] for image in (yield from self.query("GET", "images/json", params={"all": 0})): if image['RepoTags']: for tag in image['RepoTags']: if tag != "<none>:<none>": images.append({'image': tag}) return sorted(images, key=lambda i: i['image'])
Gets Docker image list. :returns: list of dicts :rtype: list
def _evolve_reader(in_file): """Generate a list of region IDs and trees from a top_k_trees evolve.py file. """ cur_id_list = None cur_tree = None with open(in_file) as in_handle: for line in in_handle: if line.startswith("id,"): if cur_id_list: yield cur_id_list, cur_tree cur_id_list = [] cur_tree = None elif cur_tree is not None: if line.strip() and not line.startswith("Number of non-empty"): cur_tree.append(line.rstrip()) elif not line.strip() and cur_id_list and len(cur_id_list) > 0: cur_tree = [] elif line.strip(): parts = [] for part in line.strip().split("\t"): if part.endswith(","): part = part[:-1] parts.append(part) if len(parts) > 4: nid, freq, _, _, support = parts cur_id_list.append((nid, freq, support.split("; "))) if cur_id_list: yield cur_id_list, cur_tree
Generate a list of region IDs and trees from a top_k_trees evolve.py file.
async def send_data(self, data, addr): """ Send data to a remote host via the TURN server. """ channel = self.peer_to_channel.get(addr) if channel is None: channel = self.channel_number self.channel_number += 1 self.channel_to_peer[channel] = addr self.peer_to_channel[addr] = channel # bind channel await self.channel_bind(channel, addr) header = struct.pack('!HH', channel, len(data)) self._send(header + data)
Send data to a remote host via the TURN server.
def match_bitap(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Python doesn't have a maxint limit, so ignore this check. #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: # raise ValueError("Pattern too long for this application.") # Initialise the alphabet. s = self.match_alphabet(pattern) def match_bitapScore(e, x): """Compute and return the score for a match with e errors and x location. Accesses loc and pattern through being a closure. Args: e: Number of errors in match. x: Location of match. Returns: Overall score for match (0.0 = good, 1.0 = bad). """ accuracy = float(e) / len(pattern) proximity = abs(loc - x) if not self.Match_Distance: # Dodge divide by zero error. return proximity and 1.0 or accuracy return accuracy + (proximity / float(self.Match_Distance)) # Highest score beyond which we give up. score_threshold = self.Match_Threshold # Is there a nearby exact match? (speedup) best_loc = text.find(pattern, loc) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # What about in the other direction? (speedup) best_loc = text.rfind(pattern, loc + len(pattern)) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # Initialise the bit arrays. matchmask = 1 << (len(pattern) - 1) best_loc = -1 bin_max = len(pattern) + len(text) # Empty initialization added to appease pychecker. last_rd = None for d in range(len(pattern)): # Scan for the best match each iteration allows for one more error. # Run a binary search to determine how far from 'loc' we can stray at # this error level. bin_min = 0 bin_mid = bin_max while bin_min < bin_mid: if match_bitapScore(d, loc + bin_mid) <= score_threshold: bin_min = bin_mid else: bin_max = bin_mid bin_mid = (bin_max - bin_min) // 2 + bin_min # Use the result from this iteration as the maximum for the next. bin_max = bin_mid start = max(1, loc - bin_mid + 1) finish = min(loc + bin_mid, len(text)) + len(pattern) rd = [0] * (finish + 2) rd[finish + 1] = (1 << d) - 1 for j in range(finish, start - 1, -1): if len(text) <= j - 1: # Out of range. charMatch = 0 else: charMatch = s.get(text[j - 1], 0) if d == 0: # First pass: exact match. rd[j] = ((rd[j + 1] << 1) | 1) & charMatch else: # Subsequent passes: fuzzy match. rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] if rd[j] & matchmask: score = match_bitapScore(d, j - 1) # This match will almost certainly be better than any existing match. # But check anyway. if score <= score_threshold: # Told you so. score_threshold = score best_loc = j - 1 if best_loc > loc: # When passing loc, don't exceed our current distance from loc. start = max(1, 2 * loc - best_loc) else: # Already passed loc, downhill from here on in. break # No hope for a (better) match at greater error levels. if match_bitapScore(d + 1, loc) > score_threshold: break last_rd = rd return best_loc
Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1.
def format_results(self, results): """ Format the ldap results object into somthing that is reasonable """ if not results: return None userdn = results[0][0] userobj = results[0][1] userobj['dn'] = userdn keymap = self.config.get('KEY_MAP') if keymap: return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) } else: return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
Format the ldap results object into somthing that is reasonable
def get_hstwcs(filename,hdulist,extnum): """ Return the HSTWCS object for a given chip. """ hdrwcs = wcsutil.HSTWCS(hdulist,ext=extnum) hdrwcs.filename = filename hdrwcs.expname = hdulist[extnum].header['expname'] hdrwcs.extver = hdulist[extnum].header['extver'] return hdrwcs
Return the HSTWCS object for a given chip.
def _get_nets_krnic(self, *args, **kwargs): """ Deprecated. This will be removed in a future release. """ from warnings import warn warn('NIRWhois._get_nets_krnic() has been deprecated and will be ' 'removed. You should now use NIRWhois.get_nets_krnic().') return self.get_nets_krnic(*args, **kwargs)
Deprecated. This will be removed in a future release.
def filetree(self): """ :attr:`files` as a dictionary tree Each node is a ``dict`` that maps directory/file names to child nodes. Each child node is a ``dict`` for directories and ``None`` for files. If :attr:`path` is ``None``, this is an empty ``dict``. """ tree = {} # Complete directory tree prefix = [] paths = (f.split(os.sep) for f in self.files) for path in paths: dirpath = path[:-1] # Path without filename filename = path[-1] subtree = tree for item in dirpath: if item not in subtree: subtree[item] = {} subtree = subtree[item] subtree[filename] = None return tree
:attr:`files` as a dictionary tree Each node is a ``dict`` that maps directory/file names to child nodes. Each child node is a ``dict`` for directories and ``None`` for files. If :attr:`path` is ``None``, this is an empty ``dict``.
def jsonnummultby(self, name, path, number): """ Multiplies the numeric (integer or floating point) JSON value under ``path`` at key ``name`` with the provided ``number`` """ return self.execute_command('JSON.NUMMULTBY', name, str_path(path), self._encode(number))
Multiplies the numeric (integer or floating point) JSON value under ``path`` at key ``name`` with the provided ``number``
def do_read(self, args): """read <addr> ( <objid> ( <prop> [ <indx> ] )... )...""" args = args.split() if _debug: ReadPropertyMultipleConsoleCmd._debug("do_read %r", args) try: i = 0 addr = args[i] i += 1 read_access_spec_list = [] while i < len(args): obj_id = ObjectIdentifier(args[i]).value i += 1 prop_reference_list = [] while i < len(args): prop_id = args[i] if prop_id not in PropertyIdentifier.enumerations: break i += 1 if prop_id in ('all', 'required', 'optional'): pass else: datatype = get_datatype(obj_id[0], prop_id) if not datatype: raise ValueError("invalid property for object type") # build a property reference prop_reference = PropertyReference( propertyIdentifier=prop_id, ) # check for an array index if (i < len(args)) and args[i].isdigit(): prop_reference.propertyArrayIndex = int(args[i]) i += 1 # add it to the list prop_reference_list.append(prop_reference) # check for at least one property if not prop_reference_list: raise ValueError("provide at least one property") # build a read access specification read_access_spec = ReadAccessSpecification( objectIdentifier=obj_id, listOfPropertyReferences=prop_reference_list, ) # add it to the list read_access_spec_list.append(read_access_spec) # check for at least one if not read_access_spec_list: raise RuntimeError("at least one read access specification required") # build the request request = ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_spec_list, ) request.pduDestination = Address(addr) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - request: %r", request) # make an IOCB iocb = IOCB(request) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - iocb: %r", iocb) # give it to the application deferred(this_application.request_io, iocb) # wait for it to complete iocb.wait() # do something for success if iocb.ioResponse: apdu = iocb.ioResponse # should be an ack if not isinstance(apdu, ReadPropertyMultipleACK): if _debug: ReadPropertyMultipleConsoleCmd._debug(" - not an ack") return # loop through the results for result in apdu.listOfReadAccessResults: # here is the object identifier objectIdentifier = result.objectIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - objectIdentifier: %r", objectIdentifier) # now come the property values per object for element in result.listOfResults: # get the property and array index propertyIdentifier = element.propertyIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyIdentifier: %r", propertyIdentifier) propertyArrayIndex = element.propertyArrayIndex if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyArrayIndex: %r", propertyArrayIndex) # here is the read result readResult = element.readResult sys.stdout.write(propertyIdentifier) if propertyArrayIndex is not None: sys.stdout.write("[" + str(propertyArrayIndex) + "]") # check for an error if readResult.propertyAccessError is not None: sys.stdout.write(" ! " + str(readResult.propertyAccessError) + '\n') else: # here is the value propertyValue = readResult.propertyValue # find the datatype datatype = get_datatype(objectIdentifier[0], propertyIdentifier) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise TypeError("unknown datatype") # special case for array parts, others are managed by cast_out if issubclass(datatype, Array) and (propertyArrayIndex is not None): if propertyArrayIndex == 0: value = propertyValue.cast_out(Unsigned) else: value = propertyValue.cast_out(datatype.subtype) else: value = propertyValue.cast_out(datatype) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - value: %r", value) sys.stdout.write(" = " + str(value) + '\n') sys.stdout.flush() # do something for error/reject/abort if iocb.ioError: sys.stdout.write(str(iocb.ioError) + '\n') except Exception, error: ReadPropertyMultipleConsoleCmd._exception("exception: %r", error)
read <addr> ( <objid> ( <prop> [ <indx> ] )... )...
def array2tree(arr, name='tree', tree=None): """Convert a numpy structured array into a ROOT TTree. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array name : str (optional, default='tree') Name of the created ROOT TTree if ``tree`` is None. tree : ROOT TTree (optional, default=None) An existing ROOT TTree to be extended by the numpy array. Any branch with the same name as a field in the numpy array will be extended as long as the types are compatible, otherwise a TypeError is raised. New branches will be created and filled for all new fields. Returns ------- root_tree : a ROOT TTree Notes ----- When using the ``tree`` argument to extend and/or add new branches to an existing tree, note that it is possible to create branches of different lengths. This will result in a warning from ROOT when root_numpy calls the tree's ``SetEntries()`` method. Beyond that, the tree should still be usable. While it might not be generally recommended to create branches with differing lengths, this behaviour could be required in certain situations. root_numpy makes no attempt to prevent such behaviour as this would be more strict than ROOT itself. Also see the note about converting trees that have branches of different lengths into numpy arrays in the documentation of :func:`tree2array`. See Also -------- array2root root2array tree2array Examples -------- Convert a numpy array into a tree: >>> from root_numpy import array2tree >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> tree = array2tree(a) >>> tree.Scan() ************************************************ * Row * a * b * c * ************************************************ * 0 * 1 * 2.5 * 3.4 * * 1 * 4 * 5 * 6.8 * ************************************************ Add new branches to an existing tree (continuing from the example above): >>> b = np.array([(4, 10), ... (3, 5)], ... dtype=[('d', np.int32), ... ('e', np.int32)]) >>> array2tree(b, tree=tree) <ROOT.TTree object ("tree") at 0x1449970> >>> tree.Scan() ************************************************************************ * Row * a * b * c * d * e * ************************************************************************ * 0 * 1 * 2.5 * 3.4 * 4 * 10 * * 1 * 4 * 5 * 6.8 * 3 * 5 * ************************************************************************ """ import ROOT if tree is not None: if not isinstance(tree, ROOT.TTree): raise TypeError("tree must be a ROOT.TTree") incobj = ROOT.AsCObject(tree) else: incobj = None cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj) return ROOT.BindObject(cobj, 'TTree')
Convert a numpy structured array into a ROOT TTree. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array name : str (optional, default='tree') Name of the created ROOT TTree if ``tree`` is None. tree : ROOT TTree (optional, default=None) An existing ROOT TTree to be extended by the numpy array. Any branch with the same name as a field in the numpy array will be extended as long as the types are compatible, otherwise a TypeError is raised. New branches will be created and filled for all new fields. Returns ------- root_tree : a ROOT TTree Notes ----- When using the ``tree`` argument to extend and/or add new branches to an existing tree, note that it is possible to create branches of different lengths. This will result in a warning from ROOT when root_numpy calls the tree's ``SetEntries()`` method. Beyond that, the tree should still be usable. While it might not be generally recommended to create branches with differing lengths, this behaviour could be required in certain situations. root_numpy makes no attempt to prevent such behaviour as this would be more strict than ROOT itself. Also see the note about converting trees that have branches of different lengths into numpy arrays in the documentation of :func:`tree2array`. See Also -------- array2root root2array tree2array Examples -------- Convert a numpy array into a tree: >>> from root_numpy import array2tree >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> tree = array2tree(a) >>> tree.Scan() ************************************************ * Row * a * b * c * ************************************************ * 0 * 1 * 2.5 * 3.4 * * 1 * 4 * 5 * 6.8 * ************************************************ Add new branches to an existing tree (continuing from the example above): >>> b = np.array([(4, 10), ... (3, 5)], ... dtype=[('d', np.int32), ... ('e', np.int32)]) >>> array2tree(b, tree=tree) <ROOT.TTree object ("tree") at 0x1449970> >>> tree.Scan() ************************************************************************ * Row * a * b * c * d * e * ************************************************************************ * 0 * 1 * 2.5 * 3.4 * 4 * 10 * * 1 * 4 * 5 * 6.8 * 3 * 5 * ************************************************************************
def prov(self): """ :return: This bundle's provenance :rtype: :py:class:`prov.model.ProvDocument` """ if not self._prov: self._prov = self._api.get_bundle(self._document.id, self._id) return self._prov
:return: This bundle's provenance :rtype: :py:class:`prov.model.ProvDocument`
def update_rtfilters(self): """Updates RT filters for each peer. Should be called if a new RT Nlri's have changed based on the setting. Currently only used by `Processor` to update the RT filters after it has processed a RT destination. If RT filter has changed for a peer we call RT filter change handler. """ # Update RT filter for all peers # TODO(PH): Check if getting this map can be optimized (if expensive) new_peer_to_rtfilter_map = self._compute_rtfilter_map() # If we have new best path for RT NLRI, we have to update peer RT # filters and take appropriate action of sending them NLRIs for other # address-families as per new RT filter if necessary. for peer in self._peer_manager.iterpeers: pre_rt_filter = self._rt_mgr.peer_to_rtfilter_map.get(peer, set()) curr_rt_filter = new_peer_to_rtfilter_map.get(peer, set()) old_rts = pre_rt_filter - curr_rt_filter new_rts = curr_rt_filter - pre_rt_filter # If interested RTs for a peer changes if new_rts or old_rts: LOG.debug('RT Filter for peer %s updated: ' 'Added RTs %s, Removed Rts %s', peer.ip_address, new_rts, old_rts) self._on_update_rt_filter(peer, new_rts, old_rts) # Update to new RT filters self._peer_manager.set_peer_to_rtfilter_map(new_peer_to_rtfilter_map) self._rt_mgr.peer_to_rtfilter_map = new_peer_to_rtfilter_map LOG.debug('Updated RT filters: %s', self._rt_mgr.peer_to_rtfilter_map) # Update interested RTs i.e. RTs on the path that will be installed # into global tables self._rt_mgr.update_interested_rts()
Updates RT filters for each peer. Should be called if a new RT Nlri's have changed based on the setting. Currently only used by `Processor` to update the RT filters after it has processed a RT destination. If RT filter has changed for a peer we call RT filter change handler.
def get_token_by_code(self, code): '''return origin json''' url = 'https://openapi.youku.com/v2/oauth2/token' data = {'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'authorization_code', 'code': code, 'redirect_uri': self.redirect_uri} r = requests.post(url, data=data) check_error(r) return r.json()
return origin json
def fetch_weeks(self, weeks, overwrite=False): """Fetch and cache the requested weeks.""" esf = ElasticsearchFetcher(self.store, self.config) for year, week in weeks: print("Fetch {}-{}".format(year, week)) esf.fetch(year, week, overwrite)
Fetch and cache the requested weeks.
def delete_router(self, router): ''' Delete the specified router ''' router_id = self._find_router_id(router) ret = self.network_conn.delete_router(router=router_id) return ret if ret else True
Delete the specified router
def case_insensitive(self): """Matching packages distinguish between uppercase and lowercase """ if "--case-ins" in self.flag: data_dict = Utils().case_sensitive(self.data) for key, value in data_dict.iteritems(): if key == self.name.lower(): self.name = value
Matching packages distinguish between uppercase and lowercase
def execute_system_command(arg, **_): """Execute a system shell command.""" usage = "Syntax: system [command].\n" if not arg: return [(None, None, None, usage)] try: command = arg.strip() if command.startswith('cd'): ok, error_message = handle_cd_command(arg) if not ok: return [(None, None, None, error_message)] return [(None, None, None, '')] args = arg.split(' ') process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = process.communicate() response = output if not error else error # Python 3 returns bytes. This needs to be decoded to a string. if isinstance(response, bytes): encoding = locale.getpreferredencoding(False) response = response.decode(encoding) return [(None, None, None, response)] except OSError as e: return [(None, None, None, 'OSError: %s' % e.strerror)]
Execute a system shell command.
def print_pack(document_loader, # type: Loader processobj, # type: CommentedMap uri, # type: Text metadata # type: Dict[Text, Any] ): # type (...) -> Text """Return a CWL serialization of the CWL document in JSON.""" packed = pack(document_loader, processobj, uri, metadata) if len(packed["$graph"]) > 1: return json_dumps(packed, indent=4) return json_dumps(packed["$graph"][0], indent=4)
Return a CWL serialization of the CWL document in JSON.
def get_limit_action(self, criticity, stat_name=""): """Return the tuple (action, repeat) for the alert. - action is a command line - repeat is a bool """ # Get the action for stat + header # Exemple: network_wlan0_rx_careful_action # Action key available ? ret = [(stat_name + '_' + criticity + '_action', False), (stat_name + '_' + criticity + '_action_repeat', True), (self.plugin_name + '_' + criticity + '_action', False), (self.plugin_name + '_' + criticity + '_action_repeat', True)] for r in ret: if r[0] in self._limits: return self._limits[r[0]], r[1] # No key found, the raise an error raise KeyError
Return the tuple (action, repeat) for the alert. - action is a command line - repeat is a bool
def tryLoadingFrom(tryPath,moduleName='swhlab'): """if the module is in this path, load it from the local folder.""" if not 'site-packages' in swhlab.__file__: print("loaded custom swhlab module from", os.path.dirname(swhlab.__file__)) return # no need to warn if it's already outside. while len(tryPath)>5: sp=tryPath+"/swhlab/" # imaginary swhlab module path if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"): if not os.path.dirname(tryPath) in sys.path: sys.path.insert(0,os.path.dirname(tryPath)) print("#"*80) print("# WARNING: using site-packages swhlab module") print("#"*80) tryPath=os.path.dirname(tryPath) return
if the module is in this path, load it from the local folder.
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None): """ Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630") """ from .app import evaluateTarget results = [] sourceContext = requestContext.copy() if startSourceAt is not None: sourceContext['startTime'] = parseATTime(startSourceAt) if endSourceAt is not None: sourceContext['endTime'] = parseATTime(endSourceAt) sourceList = [] for series in seriesList: source = evaluateTarget(sourceContext, series.pathExpression) sourceList.extend(source) for source, series in zip(sourceList, seriesList): newName = 'linearRegression(%s, %s, %s)' % ( series.name, int(epoch(sourceContext['startTime'])), int(epoch(sourceContext['endTime']))) forecast = linearRegressionAnalysis(source) if forecast is None: continue factor, offset = forecast values = [offset + (series.start + i * series.step) * factor for i in range(len(series))] newSeries = TimeSeries(newName, series.start, series.end, series.step, values) newSeries.pathExpression = newSeries.name results.append(newSeries) return results
Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630")
def save_image(self, img, filename=None, **kwargs): # floating_point=False, """Save the image to the given *filename* in ninjotiff_ format. .. _ninjotiff: http://www.ssec.wisc.edu/~davidh/polar2grid/misc/NinJo_Satellite_Import_Formats.html """ filename = filename or self.get_filename(**img.data.attrs) nt.save(img, filename, **kwargs)
Save the image to the given *filename* in ninjotiff_ format. .. _ninjotiff: http://www.ssec.wisc.edu/~davidh/polar2grid/misc/NinJo_Satellite_Import_Formats.html
def create_token_for_user(user: get_user_model()) -> bytes: """ Create a new random auth token for user. """ token = urandom(48) AuthToken.objects.create( hashed_token=AuthToken._hash_token(token), user=user) return token
Create a new random auth token for user.
def _find_by_sha1(self, sha1): """ Return an |ImagePart| object belonging to this package or |None| if no matching image part is found. The image part is identified by the SHA1 hash digest of the image binary it contains. """ for image_part in self: # ---skip unknown/unsupported image types, like SVG--- if not hasattr(image_part, 'sha1'): continue if image_part.sha1 == sha1: return image_part return None
Return an |ImagePart| object belonging to this package or |None| if no matching image part is found. The image part is identified by the SHA1 hash digest of the image binary it contains.
def dijkstra(G, start, weight='weight'): """ Compute shortest path length between satrt and all other reachable nodes for a weight graph. return -> ({vertex: weight form start, }, {vertex: predeseccor, }) """ if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) visited = {start: 0} path = {} vertices = set(G.vertices.keys()) while vertices: min_vertex = None for vertex in vertices: if vertex in visited: if min_vertex is None or visited[vertex] < visited[min_vertex]: min_vertex = vertex if min_vertex is None: break vertices.remove(min_vertex) current_weight = visited[min_vertex] for edge in G.vertices[min_vertex]: edge_weight = current_weight + G.edges[(min_vertex, edge)][weight] if edge not in visited or edge_weight < visited[edge]: visited[edge] = edge_weight path[edge] = min_vertex return visited, path
Compute shortest path length between satrt and all other reachable nodes for a weight graph. return -> ({vertex: weight form start, }, {vertex: predeseccor, })
def plot_report(report, success_name, fail_names, label=None, is_max_confidence=True, linewidth=LINEWIDTH, plot_upper_bound=True): """ Plot a success fail curve from a confidence report :param report: A confidence report (the type of object saved by make_confidence_report.py) :param success_name: see plot_report_from_path :param fail_names: see plot_report_from_path :param label: see plot_report_from_path :param is_max_confidence: see plot_report_from_path :param linewidth: see plot_report_from_path """ (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound, success_bounded) = make_curve(report, success_name, fail_names) assert len(fail_lower_bound) == len(fail_upper_bound) fail_optimal = np.array(fail_optimal) fail_lower_bound = np.array(fail_lower_bound) fail_upper_bound = np.array(fail_upper_bound) if is_max_confidence: p, = pyplot.plot(fail_optimal, success_optimal, label=label, linewidth=linewidth) color = p.get_color() pyplot.plot(fail_lower_bound, success_bounded, '--', color=color) if plot_upper_bound: pyplot.plot(fail_upper_bound, success_bounded, '--', color=color) else: # If the attack was not MaxConfidence, then this whole curve is just # a loose lower bound all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0) pyplot.plot(all_fail, success_optimal + success_bounded, '--', label=label, linewidth=linewidth) pyplot.xlabel("Failure rate on adversarial examples") pyplot.ylabel("Success rate on clean examples") gap = fail_upper_bound - fail_lower_bound if gap.size > 0: assert gap.min() >= 0. print("Max gap: ", gap.max())
Plot a success fail curve from a confidence report :param report: A confidence report (the type of object saved by make_confidence_report.py) :param success_name: see plot_report_from_path :param fail_names: see plot_report_from_path :param label: see plot_report_from_path :param is_max_confidence: see plot_report_from_path :param linewidth: see plot_report_from_path
def longestorf(args): """ %prog longestorf fastafile Find longest ORF for each sequence in fastafile. """ p = OptionParser(longestorf.__doc__) p.add_option("--ids", action="store_true", help="Generate table with ORF info [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args pf = fastafile.rsplit(".", 1)[0] orffile = pf + ".orf.fasta" idsfile = None if opts.ids: idsfile = pf + ".orf.ids" fwids = open(idsfile, "w") f = Fasta(fastafile, lazy=True) fw = must_open(orffile, "w") before, after = 0, 0 for name, rec in f.iteritems_ordered(): cds = rec.seq before += len(cds) # Try all six frames orf = ORFFinder(cds) lorf = orf.get_longest_orf() newcds = Seq(lorf) after += len(newcds) newrec = SeqRecord(newcds, id=name, description=rec.description) SeqIO.write([newrec], fw, "fasta") if idsfile: print("\t".join((name, orf.info)), file=fwids) fw.close() if idsfile: fwids.close() logging.debug("Longest ORFs written to `{0}` ({1}).".\ format(orffile, percentage(after, before))) return orffile
%prog longestorf fastafile Find longest ORF for each sequence in fastafile.
def get_bins(self): """Gets the bin list resulting from the search. return: (osid.resource.BinList) - the bin list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.BinList(self._results, runtime=self._runtime)
Gets the bin list resulting from the search. return: (osid.resource.BinList) - the bin list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
def resolve(input, representation, resolvers=None, get3d=False, **kwargs): """Resolve input to the specified output representation. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :returns: Output representation or None :rtype: string or None :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable """ # Take first result from XML query results = query(input, representation, resolvers, False, get3d, **kwargs) result = results[0].value if results else None return result
Resolve input to the specified output representation. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :returns: Output representation or None :rtype: string or None :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable
def _cast(cls, base_info, take_ownership=True): """Casts a GIBaseInfo instance to the right sub type. The original GIBaseInfo can't have ownership. Will take ownership. """ type_value = base_info.type.value try: new_obj = cast(base_info, cls.__types[type_value]) except KeyError: new_obj = base_info if take_ownership: assert not base_info.__owns new_obj._take_ownership() return new_obj
Casts a GIBaseInfo instance to the right sub type. The original GIBaseInfo can't have ownership. Will take ownership.
def request_handler(self, can_handle_func): # type: (Callable[[Input], bool]) -> Callable """Decorator that can be used to add request handlers easily to the builder. The can_handle_func has to be a Callable instance, which takes a single parameter and no varargs or kwargs. This is because of the RequestHandler class signature restrictions. The returned wrapper function can be applied as a decorator on any function that returns a response object by the skill. The function should follow the signature of the handle function in :py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractRequestHandler` class. :param can_handle_func: The function that validates if the request can be handled. :type can_handle_func: Callable[[Input], bool] :return: Wrapper function that can be decorated on a handle function. """ def wrapper(handle_func): if not callable(can_handle_func) or not callable(handle_func): raise SkillBuilderException( "Request Handler can_handle_func and handle_func " "input parameters should be callable") class_attributes = { "can_handle": lambda self, handler_input: can_handle_func( handler_input), "handle": lambda self, handler_input: handle_func( handler_input) } request_handler_class = type( "RequestHandler{}".format( handle_func.__name__.title().replace("_", "")), (AbstractRequestHandler,), class_attributes) self.add_request_handler(request_handler=request_handler_class()) return wrapper
Decorator that can be used to add request handlers easily to the builder. The can_handle_func has to be a Callable instance, which takes a single parameter and no varargs or kwargs. This is because of the RequestHandler class signature restrictions. The returned wrapper function can be applied as a decorator on any function that returns a response object by the skill. The function should follow the signature of the handle function in :py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractRequestHandler` class. :param can_handle_func: The function that validates if the request can be handled. :type can_handle_func: Callable[[Input], bool] :return: Wrapper function that can be decorated on a handle function.
def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): """Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_set_storage_class_rule] :end-before: [END add_lifecycle_set_storage_class_rule] :type storage_class: str, one of :attr:`_STORAGE_CLASSES`. :param storage_class: new storage class to assign to matching items. :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`. """ rules = list(self.lifecycle_rules) rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) self.lifecycle_rules = rules
Add a "delete" rule to lifestyle rules configured for this bucket. See https://cloud.google.com/storage/docs/lifecycle and https://cloud.google.com/storage/docs/json_api/v1/buckets .. literalinclude:: snippets.py :start-after: [START add_lifecycle_set_storage_class_rule] :end-before: [END add_lifecycle_set_storage_class_rule] :type storage_class: str, one of :attr:`_STORAGE_CLASSES`. :param storage_class: new storage class to assign to matching items. :type kw: dict :params kw: arguments passed to :class:`LifecycleRuleConditions`.