positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def complete(self): """ Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA' """ # Boolean to store the completeness of the analyses allcomplete = True # Clear the list of samples that still require more sequence data self.incomplete = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # If the sample has been tagged as incomplete, only add it to the complete metadata list if the # pipeline is on its final iteration if sample.general.incomplete: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) except AttributeError: sample.general.complete = True self.completemetadata.append(sample) else: if self.final: self.completemetadata.append(sample) else: sample.general.complete = False allcomplete = False self.incomplete.append(sample.name) # If all the samples are complete, set the global variable for run completeness to True if allcomplete: self.analysescomplete = True
Determine if the analyses of the strains are complete e.g. there are no missing GDCS genes, and the sample.general.bestassemblyfile != 'NA'
def browse(self, folder, levels=None, prefix=None): """ Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str): The root folder to traverse. levels (int): How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str): Defines a prefix within the tree where to start building the structure. Returns: dict """ assert isinstance(levels, int) or levels is None assert isinstance(prefix, string_types) or prefix is None return self.get('browse', params={'folder': folder, 'levels': levels, 'prefix': prefix})
Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. Args: folder (str): The root folder to traverse. levels (int): How deep within the tree we want to dwell down. (0 based, defaults to unlimited depth) prefix (str): Defines a prefix within the tree where to start building the structure. Returns: dict
def set_level(self, val): """Set the device ON LEVEL.""" if val == 0: self.off() elif val == 255: self.on() else: setlevel = 255 if val < 1: setlevel = val * 255 elif val <= 0xff: setlevel = val change = setlevel - self._value increment = 255 / self._steps steps = round(abs(change) / increment) print('Steps: ', steps) if change > 0: method = self.brighten self._value += round(steps * increment) self._value = min(255, self._value) else: method = self.dim self._value -= round(steps * increment) self._value = max(0, self._value) # pylint: disable=unused-variable for step in range(0, steps): method(True) self._update_subscribers(self._value)
Set the device ON LEVEL.
def bindToProperty(self,instance,propertyName,useGetter=False): """ 2-way binds to an instance property. Parameters: - instance -- the object instance - propertyName -- the name of the property to bind to - useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False) Notes: 2-way binds to an instance property according to one of the following naming conventions: @property, propertyName.setter and pyqtSignal - getter: propertyName - setter: propertyName - changedSignal: propertyNameChanged getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox) - getter: propertyName() - setter: setPropertyName() - changedSignal: propertyNameChanged """ endpoint = BindingEndpoint.forProperty(instance,propertyName,useGetter = useGetter) self.bindToEndPoint(endpoint)
2-way binds to an instance property. Parameters: - instance -- the object instance - propertyName -- the name of the property to bind to - useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False) Notes: 2-way binds to an instance property according to one of the following naming conventions: @property, propertyName.setter and pyqtSignal - getter: propertyName - setter: propertyName - changedSignal: propertyNameChanged getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox) - getter: propertyName() - setter: setPropertyName() - changedSignal: propertyNameChanged
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
Return a dict with default values for all properties declared on this class.
def LayerNorm( x, epsilon=1e-5, use_bias=True, use_scale=True, gamma_init=None, data_format='channels_last'): """ Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not. """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': chan = shape[1] new_shape = [1, chan, 1, 1] else: chan = shape[-1] new_shape = [1, 1, 1, chan] if ndims == 2: new_shape = [1, chan] if use_bias: beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [chan], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_scale: vh.gamma = gamma if use_bias: vh.beta = beta return ret
Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not.
def get_iso_time(): '''returns time as ISO string, mapping to and from datetime in ugly way convert to string with str() ''' t1 = time.time() t2 = datetime.datetime.fromtimestamp(t1) t4 = t2.__str__() try: t4a, t4b = t4.split(".", 1) except ValueError: t4a = t4 t4b = '000000' t5 = datetime.datetime.strptime(t4a, "%Y-%m-%d %H:%M:%S") ms = int(t4b.ljust(6, '0')[:6]) return t5.replace(microsecond=ms)
returns time as ISO string, mapping to and from datetime in ugly way convert to string with str()
def main(): """ NAME s_hext.py DESCRIPTION calculates Hext statistics for tensor data SYNTAX s_hext.py [-h][-i][-f file] [<filename] OPTIONS -h prints help message and quits -f file specifies filename on command line -l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom < filename, reads from standard input (Unix like operating systems only) INPUT x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line] OUTPUT F F12 F23 sigma and three sets of: tau dec inc Eij dec inc Eik dec inc DEFAULT average whole file """ ave=1 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-l' in sys.argv: ind=sys.argv.index('-l') npts=int(sys.argv[ind+1]) ave=0 if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') data=f.readlines() f.close() else: data=sys.stdin.readlines() Ss=[] for line in data: s=[] rec=line.split() for i in range(6): s.append(float(rec[i])) if ave==0: sig=float(rec[6]) hpars=pmag.dohext(npts-6,sig,s) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('Nmeas = ',npts,' sigma = ',sig)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] )) else: Ss.append(s) if ave==1: npts=len(Ss) nf,sigma,avs=pmag.sbar(Ss) hpars=pmag.dohext(nf,sigma,avs) print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23'])) print('%s %i %s %14.12f'%('N = ',npts,' sigma = ',sigma)) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] )) print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] ))
NAME s_hext.py DESCRIPTION calculates Hext statistics for tensor data SYNTAX s_hext.py [-h][-i][-f file] [<filename] OPTIONS -h prints help message and quits -f file specifies filename on command line -l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom < filename, reads from standard input (Unix like operating systems only) INPUT x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line] OUTPUT F F12 F23 sigma and three sets of: tau dec inc Eij dec inc Eik dec inc DEFAULT average whole file
def summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"]) jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"])) work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return []
Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary
def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf
Return size bytes from the stream.
def get_composition(self, composition_id): """Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(composition_id, 'repository').get_identifier())}, **self._view_filter())) return objects.Composition(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.*
def _on_dirty_changed(self, dirty): """ Adds a star in front of a dirtt tab and emits dirty_changed. """ try: title = self._current._tab_name index = self.indexOf(self._current) if dirty: self.setTabText(index, "* " + title) else: self.setTabText(index, title) except AttributeError: pass self.dirty_changed.emit(dirty)
Adds a star in front of a dirtt tab and emits dirty_changed.
def yaml_force_unicode(): """ Force pyyaml to return unicode values. """ #/ ## modified from |http://stackoverflow.com/a/2967461| if sys.version_info[0] == 2: def construct_func(self, node): return self.construct_scalar(node) yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
Force pyyaml to return unicode values.
def columns(self, model=None): """ Returns any columns used within this query. :return [<orb.Column>, ..] """ for query in self.__queries: for column in query.columns(model=model): yield column
Returns any columns used within this query. :return [<orb.Column>, ..]
def feedforward(self): """ Soon to be depriciated. Needed to make the SP implementation compatible with some older code. """ m = self._numInputs n = self._numColumns W = np.zeros((n, m)) for i in range(self._numColumns): self.getPermanence(i, W[i, :]) return W
Soon to be depriciated. Needed to make the SP implementation compatible with some older code.
def mem_extend(self, start: int, size: int) -> None: """Extends the memory of this machine state. :param start: Start of memory extension :param size: Size of memory extension """ m_extend = self.calculate_extension_size(start, size) if m_extend: extend_gas = self.calculate_memory_gas(start, size) self.min_gas_used += extend_gas self.max_gas_used += extend_gas self.check_gas() self.memory.extend(m_extend)
Extends the memory of this machine state. :param start: Start of memory extension :param size: Size of memory extension
def validate(self): """ validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception """ if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_options and k not in self.required_options and '*' not in self.valid_options): self.logger.warn("Unused option %s in %s!" % (k, self.feature_name)) for k in self.required_options: if not self.target.has(k): self._log_error( "Required option %s not present in feature %s!" % (k, self.feature_name))
validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception
def model(dropout, vocab, model_mode, output_size): """Construct the model.""" textCNN = SentimentNet(dropout=dropout, vocab_size=len(vocab), model_mode=model_mode,\ output_size=output_size) textCNN.hybridize() return textCNN
Construct the model.
def salt_api_acl_tool(username, request): ''' ..versionadded:: 2016.3.0 Verifies user requests against the API whitelist. (User/IP pair) in order to provide whitelisting for the API similar to the master, but over the API. ..code-block:: yaml rest_cherrypy: api_acl: users: '*': - 1.1.1.1 - 1.1.1.2 foo: - 8.8.4.4 bar: - '*' :param username: Username to check against the API. :type username: str :param request: Cherrypy request to check against the API. :type request: cherrypy.request ''' failure_str = ("[api_acl] Authentication failed for " "user %s from IP %s") success_str = ("[api_acl] Authentication sucessful for " "user %s from IP %s") pass_str = ("[api_acl] Authentication not checked for " "user %s from IP %s") acl = None # Salt Configuration salt_config = cherrypy.config.get('saltopts', None) if salt_config: # Cherrypy Config. cherrypy_conf = salt_config.get('rest_cherrypy', None) if cherrypy_conf: # ACL Config. acl = cherrypy_conf.get('api_acl', None) ip = request.remote.ip if acl: users = acl.get('users', {}) if users: if username in users: if ip in users[username] or '*' in users[username]: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False elif username not in users and '*' in users: if ip in users['*'] or '*' in users['*']: logger.info(success_str, username, ip) return True else: logger.info(failure_str, username, ip) return False else: logger.info(failure_str, username, ip) return False else: logger.info(pass_str, username, ip) return True
..versionadded:: 2016.3.0 Verifies user requests against the API whitelist. (User/IP pair) in order to provide whitelisting for the API similar to the master, but over the API. ..code-block:: yaml rest_cherrypy: api_acl: users: '*': - 1.1.1.1 - 1.1.1.2 foo: - 8.8.4.4 bar: - '*' :param username: Username to check against the API. :type username: str :param request: Cherrypy request to check against the API. :type request: cherrypy.request
def _wait_for_token(self, ctx, wait_token_url): ''' Returns a token from a the wait token URL @param wait_token_url URL to wait for (string) :return DischargeToken ''' resp = requests.get(wait_token_url) if resp.status_code != 200: raise InteractionError('cannot get {}'.format(wait_token_url)) json_resp = resp.json() kind = json_resp.get('kind') if kind is None: raise InteractionError( 'cannot get kind token from {}'.format(wait_token_url)) token_val = json_resp.get('token') if token_val is None: token_val = json_resp.get('token64') if token_val is None: raise InteractionError( 'cannot get token from {}'.format(wait_token_url)) token_val = base64.b64decode(token_val) return DischargeToken(kind=kind, value=token_val)
Returns a token from a the wait token URL @param wait_token_url URL to wait for (string) :return DischargeToken
def _build_kernel(self, kernel_source, compile_flags=()): """Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel """ return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel
def _cfg_exists(self, cfg_str): """Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False """ ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) cfg_raw = parse.find_lines("^" + cfg_str) LOG.debug("_cfg_exists(): Found lines %s", cfg_raw) return len(cfg_raw) > 0
Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: """Draws the given polygon.""" c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
Draws the given polygon.
def _view_interval(self, queue_type, queue_id): """Updates the queue interval in SharQ.""" response = { 'status': 'failure' } try: request_data = json.loads(request.data) interval = request_data['interval'] except Exception, e: response['message'] = e.message return jsonify(**response), 400 request_data = { 'queue_type': queue_type, 'queue_id': queue_id, 'interval': interval } try: response = self.sq.interval(**request_data) if response['status'] == 'failure': return jsonify(**response), 404 except Exception, e: response['message'] = e.message return jsonify(**response), 400 return jsonify(**response)
Updates the queue interval in SharQ.
def clean_and_split_sql(sql: str) -> List[str]: """ Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data. """ sql_tokens: List[str] = [] for token in sql.strip().split(): token = token.replace('"', "'").replace("%", "") if token.endswith("(") and len(token) > 1: sql_tokens.extend(split_table_and_column_names(token[:-1])) sql_tokens.extend(split_table_and_column_names(token[-1])) else: sql_tokens.extend(split_table_and_column_names(token)) return sql_tokens
Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data.
def lastmod(self, tag): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date') return lastitems[0].modification_date
Return the last modification of the entry.
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
Dynamo implementation of project specific metadata spec
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data): """Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values """ vdp_keyword_str = {} if mgrid is None: mgrid = self.vdp_opts.get('mgrid') mgrid_str = "mgrid2=%s" % mgrid if typeid is None: typeid = self.vdp_opts.get('typeid') typeid_str = "typeid=%s" % typeid if typeid_ver is None: typeid_ver = self.vdp_opts.get('typeidver') typeid_ver_str = "typeidver=%s" % typeid_ver if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')): vsiid_str = "uuid=%s" % vsiid else: # Only format supported for now LOG.error("Unsupported VSIID Format1") return vdp_keyword_str if vlan == constants.INVALID_VLAN: vlan = 0 if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID: if not mac or gid == 0: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str else: f = "filter=%s-%s-%s" filter_str = f % (vlan, mac, gid) elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID: if gid == 0: LOG.error("NULL GID Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID: if not mac: LOG.error("NULL MAC Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "-" + mac elif int(filter_frmt) == vdp_const.VDP_FILTER_VID: filter_str = "filter=" + '%d' % vlan else: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str oui_list = [] if oui_id is not None and oui_data is not None: if oui_id is 'cisco': oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data) mode_str = "mode=" + mode vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str, typeid=typeid_str, typeid_ver=typeid_ver_str, vsiid=vsiid_str, filter=filter_str, oui_list=oui_list) return vdp_keyword_str
Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values
def to_dataframe(self): """ Returns the entire dataset as a single pandas DataFrame. Returns ------- df : DataFrame with shape (n_instances, n_columns) A pandas DataFrame containing the complete original data table including all targets (specified by the meta data) and all features (including those that might have been filtered out). """ if pd is None: raise DatasetsError( "pandas is required to load DataFrame, it can be installed with pip" ) path = find_dataset_path(self.name, ext=".csv.gz", data_home=self.data_home) return pd.read_csv(path, compression="gzip")
Returns the entire dataset as a single pandas DataFrame. Returns ------- df : DataFrame with shape (n_instances, n_columns) A pandas DataFrame containing the complete original data table including all targets (specified by the meta data) and all features (including those that might have been filtered out).
def static_uint8_variable_for_data(variable_name, data, max_line_length=120, comment="", indent=2): r""" >>> static_uint8_variable_for_data("v", "abc") 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", comment="hi") 'static uint8_t v[3] = { // hi\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", indent=4) 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abcabcabcabc", max_line_length=20) 'static uint8_t v[12] = {\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n}; // v' """ hex_components = [] for byte in data: byte_as_hex = "0x{u:02X}".format(u=ord(byte)) hex_components.append(byte_as_hex) chunk_size = (max_line_length - indent + 2 - 1) // 6 # 6 is len("0xAA, "); +2 for the last element's ", "; -1 for the trailing comma array_lines = [] for chunk_offset in xrange(0, len(hex_components), chunk_size): chunk = hex_components[chunk_offset:chunk_offset + chunk_size] array_lines.append(" " * indent + ", ".join(chunk) + ",") array_data = "\n".join(array_lines) if comment != "": comment = " // " + comment substitutions = {"v": variable_name, "l": len(hex_components), "d": array_data, "c": comment} declaration = "static uint8_t {v}[{l}] = {{{c}\n{d}\n}}; // {v}".format(**substitutions) return declaration
r""" >>> static_uint8_variable_for_data("v", "abc") 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", comment="hi") 'static uint8_t v[3] = { // hi\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abc", indent=4) 'static uint8_t v[3] = {\n 0x61, 0x62, 0x63,\n}; // v' >>> static_uint8_variable_for_data("v", "abcabcabcabc", max_line_length=20) 'static uint8_t v[12] = {\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n 0x61, 0x62, 0x63,\n}; // v'
def set_config(self, **config): """Shadow all the current config.""" reinit = False if 'stdopt' in config: stdopt = config.pop('stdopt') reinit = (stdopt != self.stdopt) self.stdopt = stdopt if 'attachopt' in config: attachopt = config.pop('attachopt') reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if 'attachvalue' in config: attachvalue = config.pop('attachvalue') reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if 'auto2dashes' in config: self.auto2dashes = config.pop('auto2dashes') if 'name' in config: name = config.pop('name') reinit = reinit or (name != self.name) self.name = name if 'help' in config: self.help = config.pop('help') self._set_or_remove_extra_handler( self.help, ('--help', '-h'), self.help_handler) if 'version' in config: self.version = config.pop('version') self._set_or_remove_extra_handler( self.version is not None, ('--version', '-v'), self.version_handler) if 'case_sensitive' in config: case_sensitive = config.pop('case_sensitive') reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if 'optionsfirst' in config: self.options_first = config.pop('optionsfirst') if 'appearedonly' in config: self.appeared_only = config.pop('appearedonly') if 'namedoptions' in config: namedoptions = config.pop('namedoptions') reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if 'extra' in config: self.extra.update(self._formal_extra(config.pop('extra'))) if config: # should be empty raise ValueError( '`%s` %s not accepted key argument%s' % ( '`, `'.join(config), 'is' if len(config) == 1 else 'are', '' if len(config) == 1 else 's' )) if self.doc is not None and reinit: logger.warning( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self._init()
Shadow all the current config.
def ckgpav(inst, sclkdp, tol, ref): """ Get pointing (attitude) and angular velocity for a specified spacecraft clock time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html :param inst: NAIF ID of instrument, spacecraft, or structure. :type inst: int :param sclkdp: Encoded spacecraft clock time. :type sclkdp: float :param tol: Time tolerance. :type tol: float :param ref: Reference frame. :type ref: str :return: C-matrix pointing data, Angular velocity vector, Output encoded spacecraft clock time. :rtype: tuple """ inst = ctypes.c_int(inst) sclkdp = ctypes.c_double(sclkdp) tol = ctypes.c_double(tol) ref = stypes.stringToCharP(ref) cmat = stypes.emptyDoubleMatrix() av = stypes.emptyDoubleVector(3) clkout = ctypes.c_double() found = ctypes.c_int() libspice.ckgpav_c(inst, sclkdp, tol, ref, cmat, av, ctypes.byref(clkout), ctypes.byref(found)) return stypes.cMatrixToNumpy(cmat), stypes.cVectorToPython( av), clkout.value, bool(found.value)
Get pointing (attitude) and angular velocity for a specified spacecraft clock time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckgpav_c.html :param inst: NAIF ID of instrument, spacecraft, or structure. :type inst: int :param sclkdp: Encoded spacecraft clock time. :type sclkdp: float :param tol: Time tolerance. :type tol: float :param ref: Reference frame. :type ref: str :return: C-matrix pointing data, Angular velocity vector, Output encoded spacecraft clock time. :rtype: tuple
def credentials_required(method_func): """ Decorator for methods that checks that the client has credentials. Throws a CredentialsMissingError when they are absent. """ def _checkcredentials(self, *args, **kwargs): if self.username and self.password: return method_func(self, *args, **kwargs) else: raise CredentialsMissingError("This is a private method. \ You must provide a username and password when you initialize the \ DocumentCloud client to attempt this type of request.") return wraps(method_func)(_checkcredentials)
Decorator for methods that checks that the client has credentials. Throws a CredentialsMissingError when they are absent.
def dimensions(self): """Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``. """ try: call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8) except IOError: return (79, 40) else: height, width = struct.unpack("hhhh", call)[:2] return (width, height)
Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``.
def glue(self, pos): """Calculates the distance between the given position and the port :param (float, float) pos: Distance to this position is calculated :return: Distance to port :rtype: float """ # Distance between border of rectangle and point # Equation from http://stackoverflow.com/a/18157551/3568069 dx = max(self.point.x - self.width / 2. - pos[0], 0, pos[0] - (self.point.x + self.width / 2.)) dy = max(self.point.y - self.height / 2. - pos[1], 0, pos[1] - (self.point.y + self.height / 2.)) dist = sqrt(dx*dx + dy*dy) return self.point, dist
Calculates the distance between the given position and the port :param (float, float) pos: Distance to this position is calculated :return: Distance to port :rtype: float
def infer_batch(self, dataloader): """ Description : inference for LipNet """ sum_losses = 0 len_losses = 0 for input_data, input_label in dataloader: data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False) label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False) sum_losses, len_losses = self.infer(data, label) sum_losses += sum_losses len_losses += len_losses return sum_losses, len_losses
Description : inference for LipNet
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None): '''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).''' events = [] if year == None and month == None: now = datetime.now(tz=self.timezone) # timezone? else: now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone) # Get today's events, including past events start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) # Get this week's events if now.weekday() < 6: start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = start_time + timedelta(days = 6 - now.weekday()) # We do still want to return events in the next month if they fall within this week. Otherwise #if end_time.month != now.month: # end_time = end_time - timedelta(days = end_time.day) # end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone) #else: end_time = end_time + timedelta(seconds = -1) #end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) # Get this remaining events in the month start_time = end_time + timedelta(seconds = 1) if start_time.month == now.month: if now.month == 12: end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone) else: end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone) end_time = end_time - timedelta(seconds = 1) events.append(self.get_events(start_time.isoformat(), end_time.isoformat())) else: events.append([]) return events
Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).
def distance_to_contact(D, alpha=1): """Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values. """ if callable(alpha): distance_function = alpha else: try: a = np.float64(alpha) def distance_function(x): return 1 / (x ** (1 / a)) except TypeError: print("Alpha parameter must be callable or an array-like") raise except ZeroDivisionError: raise ValueError("Alpha parameter must be non-zero") m = np.max(distance_function(D[D != 0])) M = np.zeros(D.shape) M[D != 0] = distance_function(D[D != 0]) M[D == 0] = m return M
Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values.
def delete(self, lookup): """ If exactly one quote matches, delete it. Otherwise, raise a ValueError. """ lookup, num = self.split_num(lookup) if num: result = self.find_matches(lookup)[num - 1] else: result, = self.find_matches(lookup) self.db.delete_one(result)
If exactly one quote matches, delete it. Otherwise, raise a ValueError.
def get_all(self): """ Gets all component references registered in this reference map. :return: a list with component references. """ components = [] self._lock.acquire() try: for reference in self._references: components.append(reference.get_component()) finally: self._lock.release() return components
Gets all component references registered in this reference map. :return: a list with component references.
def _set_usb(self, v, load=False): """ Setter method for usb, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/usb (container) If this variable is read-only (config: false) in the source YANG file, then _set_usb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_usb() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """usb must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=usb.usb, is_container='container', presence=False, yang_name="usb", rest_name="usb", parent=self, choice=(u'protocol-type', u'usb-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__usb = t if hasattr(self, '_set'): self._set()
Setter method for usb, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/usb (container) If this variable is read-only (config: false) in the source YANG file, then _set_usb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_usb() directly.
def assert_dict_equal( first, second, key_msg_fmt="{msg}", value_msg_fmt="{msg}" ): """Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict """ first_keys = set(first.keys()) second_keys = set(second.keys()) missing_keys = list(first_keys - second_keys) extra_keys = list(second_keys - first_keys) if missing_keys or extra_keys: if missing_keys: if len(missing_keys) == 1: msg = "key {!r} missing from right dict".format( missing_keys[0] ) else: keys = ", ".join(sorted(repr(k) for k in missing_keys)) msg = "keys {} missing from right dict".format(keys) else: if len(extra_keys) == 1: msg = "extra key {!r} in right dict".format(extra_keys[0]) else: keys = ", ".join(sorted(repr(k) for k in extra_keys)) msg = "extra keys {} in right dict".format(keys) if key_msg_fmt: msg = key_msg_fmt.format( msg=msg, first=first, second=second, missing_keys=missing_keys, extra_keys=extra_keys, ) raise AssertionError(msg) for key in first: first_value = first[key] second_value = second[key] msg = "key '{}' differs: {!r} != {!r}".format( key, first_value, second_value ) if value_msg_fmt: msg = value_msg_fmt.format( msg=msg, first=first, second=second, key=key, first_value=first_value, second_value=second_value, ) msg = msg.replace("{", "{{").replace("}", "}}") assert_equal(first_value, second_value, msg_fmt=msg)
Fail unless first dictionary equals second. The dictionaries are considered equal, if they both contain the same keys, and their respective values are also equal. >>> assert_dict_equal({"foo": 5}, {"foo": 5}) >>> assert_dict_equal({"foo": 5}, {}) Traceback (most recent call last): ... AssertionError: key 'foo' missing from right dict The following key_msg_fmt arguments are supported, if the keys do not match: * msg - the default error message * first - the first dict * second - the second dict * missing_keys - list of keys missing from right * extra_keys - list of keys missing from left The following value_msg_fmt arguments are supported, if a value does not match: * msg - the default error message * first - the first dict * second - the second dict * key - the key where the value does not match * first_value - the value in the first dict * second_value - the value in the second dict
def read_samples(self, parameters, array_class=None, **kwargs): """Reads samples for the given parameter(s). The ``parameters`` can be the name of any dataset in ``samples_group``, a virtual field or method of ``FieldArray`` (as long as the file contains the necessary fields to derive the virtual field or method), and/or any numpy function of these. The ``parameters`` are parsed to figure out what datasets are needed. Only those datasets will be loaded, and will be the base-level fields of the returned ``FieldArray``. The ``static_params`` are also added as attributes of the returned ``FieldArray``. Parameters ----------- fp : InferenceFile An open file handler to read the samples from. parameters : (list of) strings The parameter(s) to retrieve. array_class : FieldArray-like class, optional The type of array to return. The class must have ``from_kwargs`` and ``parse_parameters`` methods. If None, will return a ``FieldArray``. \**kwargs : All other keyword arguments are passed to ``read_raw_samples``. Returns ------- FieldArray : The samples as a ``FieldArray``. """ # get the type of array class to use if array_class is None: array_class = FieldArray # get the names of fields needed for the given parameters possible_fields = self[self.samples_group].keys() loadfields = array_class.parse_parameters(parameters, possible_fields) samples = self.read_raw_samples(loadfields, **kwargs) # convert to FieldArray samples = array_class.from_kwargs(**samples) # add the static params and attributes addatrs = (self.static_params.items() + self[self.samples_group].attrs.items()) for (p, val) in addatrs: setattr(samples, p, val) return samples
Reads samples for the given parameter(s). The ``parameters`` can be the name of any dataset in ``samples_group``, a virtual field or method of ``FieldArray`` (as long as the file contains the necessary fields to derive the virtual field or method), and/or any numpy function of these. The ``parameters`` are parsed to figure out what datasets are needed. Only those datasets will be loaded, and will be the base-level fields of the returned ``FieldArray``. The ``static_params`` are also added as attributes of the returned ``FieldArray``. Parameters ----------- fp : InferenceFile An open file handler to read the samples from. parameters : (list of) strings The parameter(s) to retrieve. array_class : FieldArray-like class, optional The type of array to return. The class must have ``from_kwargs`` and ``parse_parameters`` methods. If None, will return a ``FieldArray``. \**kwargs : All other keyword arguments are passed to ``read_raw_samples``. Returns ------- FieldArray : The samples as a ``FieldArray``.
def get_cluster(self, word): """ Returns the cluster number for a word in the vocabulary """ idx = self.ix(word) return self.clusters[idx]
Returns the cluster number for a word in the vocabulary
def angular_momentum(self): r""" Compute the angular momentum for the phase-space positions contained in this object:: .. math:: \boldsymbol{{L}} = \boldsymbol{{q}} \times \boldsymbol{{p}} See :ref:`shape-conventions` for more information about the shapes of input and output objects. Returns ------- L : :class:`~astropy.units.Quantity` Array of angular momentum vectors. Examples -------- >>> import numpy as np >>> import astropy.units as u >>> pos = np.array([1., 0, 0]) * u.au >>> vel = np.array([0, 2*np.pi, 0]) * u.au/u.yr >>> w = PhaseSpacePosition(pos, vel) >>> w.angular_momentum() # doctest: +FLOAT_CMP <Quantity [0. ,0. ,6.28318531] AU2 / yr> """ cart = self.represent_as(coord.CartesianRepresentation) return cart.pos.cross(cart.vel).xyz
r""" Compute the angular momentum for the phase-space positions contained in this object:: .. math:: \boldsymbol{{L}} = \boldsymbol{{q}} \times \boldsymbol{{p}} See :ref:`shape-conventions` for more information about the shapes of input and output objects. Returns ------- L : :class:`~astropy.units.Quantity` Array of angular momentum vectors. Examples -------- >>> import numpy as np >>> import astropy.units as u >>> pos = np.array([1., 0, 0]) * u.au >>> vel = np.array([0, 2*np.pi, 0]) * u.au/u.yr >>> w = PhaseSpacePosition(pos, vel) >>> w.angular_momentum() # doctest: +FLOAT_CMP <Quantity [0. ,0. ,6.28318531] AU2 / yr>
def create_subcommand_synopsis(self, parser): """ show usage with description for commands """ self.add_usage(parser.usage, parser._get_positional_actions(), None, prefix='') usage = self._format_usage(parser.usage, parser._get_positional_actions(), None, '') return self._bold(usage)
show usage with description for commands
def blast(args): """ %prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output. """ p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args blastfile = deltafile.rsplit(".", 1)[0] + ".blast" if need_update(deltafile, blastfile): coords = Coords(deltafile) fw = open(blastfile, "w") for c in coords: print(c.blastline, file=fw)
%prog blast <deltafile|coordsfile> Covert delta or coordsfile to BLAST tabular output.
def interpreter_versions(self): """Python and IPython versions used by clients""" if CONF.get('main_interpreter', 'default'): from IPython.core import release versions = dict( python_version = sys.version.split("\n")[0].strip(), ipython_version = release.version ) else: import subprocess versions = {} pyexec = CONF.get('main_interpreter', 'executable') py_cmd = "%s -c 'import sys; print(sys.version.split(\"\\n\")[0])'" % \ pyexec ipy_cmd = "%s -c 'import IPython.core.release as r; print(r.version)'" \ % pyexec for cmd in [py_cmd, ipy_cmd]: try: proc = programs.run_shell_command(cmd) output, _err = proc.communicate() except subprocess.CalledProcessError: output = '' output = output.decode().split('\n')[0].strip() if 'IPython' in cmd: versions['ipython_version'] = output else: versions['python_version'] = output return versions
Python and IPython versions used by clients
def d3flare_json(metadata, file=None, **options): """ Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object. """ def convert(root): dct = OrderedDict() item_type = root.get('type') dct['class'] = root.get('class') dct['name'] = root.get('name') if item_type is ItemClass.Field.name: dct['size'] = root.get('size') dct['value'] = root.get('value') children = root.get('member') if children: # Any containable class with children dct['children'] = list() if item_type is ItemClass.Pointer.name: # Create pointer address field as child field = OrderedDict() field['class'] = dct['class'] field['name'] = '*' + dct['name'] field['size'] = root.get('size') field['value'] = root.get('value') dct['children'].append(field) for child in map(convert, children): # Recursive function call map(fnc, args). dct['children'].append(child) elif item_type is ItemClass.Pointer.name: # Null pointer (None pointer) dct['size'] = root.get('size') dct['value'] = root.get('value') return dct options['indent'] = options.get('indent', 2) if file: return json.dump(convert(metadata), file, **options) else: return json.dumps(convert(metadata), **options)
Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object.
def height(poly): """height""" num = len(poly) hgt = 0.0 for i in range(num): hgt += (poly[i][2]) return hgt/num
height
def recover_info_from_exception(err: Exception) -> Dict: """ Retrives the information added to an exception by :func:`add_info_to_exception`. """ if len(err.args) < 1: return {} info = err.args[-1] if not isinstance(info, dict): return {} return info
Retrives the information added to an exception by :func:`add_info_to_exception`.
def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place
def send_request_message(self, request_id, meta, body, _=None): """ Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not supported. Messages do not expire, as the server handles the request immediately in the same thread before this method returns. This method blocks until the server has completed handling the request. """ self._current_request = (request_id, meta, body) try: self.server.handle_next_request() finally: self._current_request = None
Receives a request from the client and handles and dispatches in in-thread. `message_expiry_in_seconds` is not supported. Messages do not expire, as the server handles the request immediately in the same thread before this method returns. This method blocks until the server has completed handling the request.
def addPartsToVSLC( self, vslc_id, allele1_id, allele2_id, zygosity_id=None, allele1_rel=None, allele2_rel=None): """ Here we add the parts to the VSLC. While traditionally alleles (reference or variant loci) are traditionally added, you can add any node (such as sequence_alterations for unlocated variations) to a vslc if they are known to be paired. However, if a sequence_alteration's loci is unknown, it probably should be added directly to the GVC. :param vslc_id: :param allele1_id: :param allele2_id: :param zygosity_id: :param allele1_rel: :param allele2_rel: :return: """ # vslc has parts allele1/allele2 if allele1_id is not None: self.addParts(allele1_id, vslc_id, allele1_rel) if allele2_id is not None and allele2_id.strip() != '': self.addParts(allele2_id, vslc_id, allele2_rel) # figure out zygosity if it's not supplied if zygosity_id is None: if allele1_id == allele2_id: zygosity_id = self.globaltt['homozygous'] else: zygosity_id = self.globaltt['heterozygous'] if zygosity_id is not None: self.graph.addTriple(vslc_id, self.globaltt['has_zygosity'], zygosity_id) return
Here we add the parts to the VSLC. While traditionally alleles (reference or variant loci) are traditionally added, you can add any node (such as sequence_alterations for unlocated variations) to a vslc if they are known to be paired. However, if a sequence_alteration's loci is unknown, it probably should be added directly to the GVC. :param vslc_id: :param allele1_id: :param allele2_id: :param zygosity_id: :param allele1_rel: :param allele2_rel: :return:
def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """ import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
def on_return(self, node): # ('value',) """Return statement: look for None, return special sentinal.""" self.retval = self.run(node.value) if self.retval is None: self.retval = ReturnedNone return
Return statement: look for None, return special sentinal.
def parse_coverage(self, f): """ Parse the contents of the Qualimap BamQC Coverage Histogram file """ # Get the sample name from the parent parent directory # Typical path: <sample name>/raw_data_qualimapReport/coverage_histogram.txt s_name = self.get_s_name(f) d = dict() for l in f['f']: if l.startswith('#'): continue coverage, count = l.split(None, 1) coverage = int(round(float(coverage))) count = float(count) d[coverage] = count if len(d) == 0: log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn'])) return None # Find median without importing anything to do it for us num_counts = sum(d.values()) cum_counts = 0 median_coverage = None for thiscov, thiscount in d.items(): cum_counts += thiscount if cum_counts >= num_counts/2: median_coverage = thiscov break self.general_stats_data[s_name]['median_coverage'] = median_coverage # Save results if s_name in self.qualimap_bamqc_coverage_hist: log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name)) self.qualimap_bamqc_coverage_hist[s_name] = d self.add_data_source(f, s_name=s_name, section='coverage_histogram')
Parse the contents of the Qualimap BamQC Coverage Histogram file
def mapPartitions(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] """ def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7]
def send(self, jsonstr): """ Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}') """ udp_sock = socket(AF_INET, SOCK_DGRAM) udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}')
async def get_data(self): """Retrieve the data.""" url = '{}/{}'.format(self.url, 'all') try: with async_timeout.timeout(5, loop=self._loop): if self.password is None: response = await self._session.get(url) else: auth = aiohttp.BasicAuth(self.username, self.password) response = await self._session.get(url, auth=auth) _LOGGER.debug("Response from Glances API: %s", response.status) print(response.status) print(response.text) self.data = await response.json() _LOGGER.debug(self.data) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from Glances API") raise exceptions.GlancesApiConnectionError()
Retrieve the data.
def get_route_name(resource_uri): """ Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters. """ resource_uri = resource_uri.strip('/') resource_uri = re.sub('\W', '', resource_uri) return resource_uri
Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters.
def _set_default_configuration_options(app): """ Sets the default configuration options used by this extension """ # Options for JWTs when the TOKEN_LOCATION is headers app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') # How long an a token created with 'create_jwt' will last before # it expires (when using the default jwt_data_callback function). app.config.setdefault('JWT_EXPIRES', datetime.timedelta(hours=1)) # What algorithm to use to sign the token. See here for a list of options: # https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py app.config.setdefault('JWT_ALGORITHM', 'HS256') # Key that acts as the identity for the JWT app.config.setdefault('JWT_IDENTITY_CLAIM', 'sub') # Expected value of the audience claim app.config.setdefault('JWT_DECODE_AUDIENCE', None) # Secret key to sign JWTs with. Only used if a symmetric algorithm is # used (such as the HS* algorithms). app.config.setdefault('JWT_SECRET_KEY', None) # Keys to sign JWTs with when use when using an asymmetric # (public/private key) algorithms, such as RS* or EC* app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None)
Sets the default configuration options used by this extension
def parse_html(fileobj, encoding): """ Given a file object *fileobj*, get an ElementTree instance. The *encoding* is assumed to be utf8. """ parser = HTMLParser(encoding=encoding, remove_blank_text=True) return parse(fileobj, parser)
Given a file object *fileobj*, get an ElementTree instance. The *encoding* is assumed to be utf8.
def solve(self): ''' Solves the single period consumption-saving problem using the method of endogenous gridpoints. Solution includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a min- imum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. Parameters ---------- none Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. ''' # Make arrays of end-of-period assets and end-of-period marginal value aNrm = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() # Construct a basic solution for this period if self.CubicBool: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeCubiccFunc) else: solution = self.makeBasicSolution(EndOfPrdvP,aNrm,interpolator=self.makeLinearcFunc) solution = self.addMPCandHumanWealth(solution) # add a few things solution = self.addSSmNrm(solution) # find steady state m # Add the value function if requested, as well as the marginal marginal # value function if cubic splines were used (to prepare for next period) if self.vFuncBool: solution = self.addvFunc(solution,EndOfPrdvP) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
Solves the single period consumption-saving problem using the method of endogenous gridpoints. Solution includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a min- imum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. Parameters ---------- none Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem.
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
Gets the descriptions for specified type. When complete the callback is called with a list of descriptions
def analysis(self): """Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services""" if isinstance(self, PartitionProxy): return AnalysisPartition(self._obj) else: return AnalysisPartition(self)
Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services
def django_url(step, url=None): """ The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append. """ base_url = step.test.live_server_url if url: return urljoin(base_url, url) else: return base_url
The URL for a page from the test server. :param step: A Gherkin step :param url: If specified, the relative URL to append.
def combineIndepDstns(*distributions): ''' Given n lists (or tuples) whose elements represent n independent, discrete probability spaces (probabilities and values), construct a joint pmf over all combinations of these independent points. Can take multivariate discrete distributions as inputs. Parameters ---------- distributions : [np.array] Arbitrary number of distributions (pmfs). Each pmf is a list or tuple. For each pmf, the first vector is probabilities and all subsequent vectors are values. For each pmf, this should be true: len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions)) Returns ------- List of arrays, consisting of: P_out: np.array Probability associated with each point in X_out. X_out: np.array (as many as in *distributions) Discrete points for the joint discrete probability mass function. Written by Nathan Palmer Latest update: 5 July August 2017 by Matthew N White ''' # Very quick and incomplete parameter check: for dist in distributions: assert len(dist[0]) == len(dist[-1]), "len(dist[0]) != len(dist[-1])" # Get information on the distributions dist_lengths = () dist_dims = () for dist in distributions: dist_lengths += (len(dist[0]),) dist_dims += (len(dist)-1,) number_of_distributions = len(distributions) # Initialize lists we will use X_out = [] P_temp = [] # Now loop through the distributions, tiling and flattening as necessary. for dd,dist in enumerate(distributions): # The shape we want before we tile dist_newshape = (1,) * dd + (len(dist[0]),) + \ (1,) * (number_of_distributions - dd) # The tiling we want to do dist_tiles = dist_lengths[:dd] + (1,) + dist_lengths[dd+1:] # Now we are ready to tile. # We don't use the np.meshgrid commands, because they do not # easily support non-symmetric grids. # First deal with probabilities Pmesh = np.tile(dist[0].reshape(dist_newshape),dist_tiles) # Tiling flatP = Pmesh.ravel() # Flatten the tiled arrays P_temp += [flatP,] #Add the flattened arrays to the output lists # Then loop through each value variable for n in range(1,dist_dims[dd]+1): Xmesh = np.tile(dist[n].reshape(dist_newshape),dist_tiles) flatX = Xmesh.ravel() X_out += [flatX,] # We're done getting the flattened X_out arrays we wanted. # However, we have a bunch of flattened P_temp arrays, and just want one # probability array. So get the probability array, P_out, here. P_out = np.prod(np.array(P_temp),axis=0) assert np.isclose(np.sum(P_out),1),'Probabilities do not sum to 1!' return [P_out,] + X_out
Given n lists (or tuples) whose elements represent n independent, discrete probability spaces (probabilities and values), construct a joint pmf over all combinations of these independent points. Can take multivariate discrete distributions as inputs. Parameters ---------- distributions : [np.array] Arbitrary number of distributions (pmfs). Each pmf is a list or tuple. For each pmf, the first vector is probabilities and all subsequent vectors are values. For each pmf, this should be true: len(X_pmf[0]) == len(X_pmf[j]) for j in range(1,len(distributions)) Returns ------- List of arrays, consisting of: P_out: np.array Probability associated with each point in X_out. X_out: np.array (as many as in *distributions) Discrete points for the joint discrete probability mass function. Written by Nathan Palmer Latest update: 5 July August 2017 by Matthew N White
def SendUcsFirmware(self, path=None, dumpXml=False): """ Uploads a specific CCO Image on UCS. - path specifies the path of the image to be uploaded. """ from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsValidationException, \ UcsException from Ucs import ConfigConfig from Mos import FirmwareDownloader if (self._transactionInProgress): raise UcsValidationException( "UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") # raise Exception("UCS transaction in progress. Cannot execute SendUcsFirmware. Complete or Undo UCS transaction.") if not path: raise UcsValidationException("path parameter is not provided.") # raise Exception("Please provide path") if not os.path.exists(path): raise UcsValidationException("Image not found <%s>" % (path)) # raise Exception("Image not found <%s>" %(path)) dn = None filePath = path localFile = os.path.basename(filePath) # Exit if image already exist on UCSM topSystem = ManagedObject(NamingId.TOP_SYSTEM) firmwareCatalogue = ManagedObject(NamingId.FIRMWARE_CATALOGUE) firmwareDistributable = ManagedObject(NamingId.FIRMWARE_DISTRIBUTABLE) firmwareDistributable.Name = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDistributable.MakeRn()]) crDn = self.ConfigResolveDn(dn, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (crDn.OutConfig.GetChildCount() > 0): raise UcsValidationException("Image file <%s> already exist on FI." % (filePath)) # raise Exception("Image file <%s> already exist on FI." %(filePath)) # Create object of type <firmwareDownloader> firmwareDownloader = ManagedObject(NamingId.FIRMWARE_DOWNLOADER) firmwareDownloader.FileName = localFile dn = UcsUtils.MakeDn([topSystem.MakeRn(), firmwareCatalogue.MakeRn(), firmwareDownloader.MakeRn()]) firmwareDownloader.Dn = dn firmwareDownloader.Status = Status.CREATED firmwareDownloader.FileName = localFile firmwareDownloader.Server = FirmwareDownloader.CONST_PROTOCOL_LOCAL firmwareDownloader.Protocol = FirmwareDownloader.CONST_PROTOCOL_LOCAL inConfig = ConfigConfig() inConfig.AddChild(firmwareDownloader) uri = "%s/operations/file-%s/image.txt" % (self.Uri(), localFile) progress = Progress() stream = file_with_callback(filePath, 'rb', progress.update, filePath) request = urllib2.Request(uri) request.add_header('Cookie', 'ucsm-cookie=%s' % (self._cookie)) request.add_data(stream) response = urllib2.urlopen(request).read() if not response: raise UcsValidationException("Unable to upload properly.") # WriteUcsWarning("Unable to upload properly.") ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (ccm.errorCode != 0): raise UcsException(ccm.errorCode, ccm.errorDescr) return ccm.OutConfig.GetChild()
Uploads a specific CCO Image on UCS. - path specifies the path of the image to be uploaded.
def page(self, value): """ Set the page which will be returned. :param value: 'page' parameter value for the rest api call :type value: str Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object """ instance = copy(self) instance._filters.append({ 'page': value }) return instance
Set the page which will be returned. :param value: 'page' parameter value for the rest api call :type value: str Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object
def reg_add(self, reg, value): """ Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``. """ if value is None: return [] elif isinstance(value, Register): return self.reg_add_reg(reg, value) elif isinstance(value, (Buffer, six.integer_types)): if isinstance(reg, Buffer): value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset if not value: return [] reg_width = self.REGISTER_WIDTH[reg] if value < -2 ** (reg_width-1): raise ValueError('%d does not fit %s' % (value, reg)) elif value >= 2 ** reg_width: raise ValueError('%d does not fit %s' % (value, reg)) if value > 0: return self.reg_add_imm(reg, value) else: return self.reg_sub_imm(reg, -value) else: raise ValueError('Invalid argument type "%s"' % repr(value))
Add a value to a register. The value can be another :class:`Register`, an :class:`Offset`, a :class:`Buffer`, an integer or ``None``. Arguments: reg(pwnypack.shellcode.types.Register): The register to add the value to. value: The value to add to the register. Returns: list: A list of mnemonics that will add ``value`` to ``reg``.
def edit(self, name, color, description=github.GithubObject.NotSet): """ :calls: `PATCH /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :param color: string :param description: string :rtype: None """ assert isinstance(name, (str, unicode)), name assert isinstance(color, (str, unicode)), color assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description post_parameters = { "name": name, "color": color, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters, headers={'Accept': Consts.mediaTypeLabelDescriptionSearchPreview} ) self._useAttributes(data)
:calls: `PATCH /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :param color: string :param description: string :rtype: None
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05): r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST] """ tmp = 2.0 / nmr_params log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \ + np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon) return int(round(np.exp(log_min_ess)))
r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision. This implements the inequality from Vats et al. (2016): .. math:: \widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}} Where :math:`p` is the number of free parameters. Args: nmr_params (int): the number of free parameters in the model alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means that we want to be in a 95% confidence region. epsilon (float): the level of precision in our multivariate ESS estimate. An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in the target distribution. Returns: float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to obtain the desired confidence region with the desired precision. References: Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo. arXiv:1512.07713v2 [math.ST]
def getLogger(name): """Create logger with custom exception() method """ def exception(self, msg, *args, **kwargs): extra = kwargs.setdefault('extra', {}) extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG) kwargs['exc_info'] = True self.log(logging.ERROR, msg, *args, **kwargs) logger = logging.getLogger(name) logger.exception = six.create_bound_method(exception, logger) return logger
Create logger with custom exception() method
def semimajor(P,mstar=1): """Returns semimajor axis in AU given P in days, mstar in solar masses. """ return ((P*DAY/2/np.pi)**2*G*mstar*MSUN)**(1./3)/AU
Returns semimajor axis in AU given P in days, mstar in solar masses.
def save_voxel_grid(voxel_grid, file_name): """ Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str """ try: with open(file_name, 'wb') as fp: for voxel in voxel_grid: fp.write(struct.pack("<I", voxel)) except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str
def find_annotations(data, retriever=None): """Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling """ conf_files = dd.get_vcfanno(data) if not isinstance(conf_files, (list, tuple)): conf_files = [conf_files] for c in _default_conf_files(data, retriever): if c not in conf_files: conf_files.append(c) conf_checkers = {"gemini": annotate_gemini, "somatic": _annotate_somatic} out = [] annodir = os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)), os.pardir, "config", "vcfanno")) if not retriever: annodir = os.path.abspath(annodir) for conf_file in conf_files: if objectstore.is_remote(conf_file) or (os.path.exists(conf_file) and os.path.isfile(conf_file)): conffn = conf_file elif not retriever: conffn = os.path.join(annodir, conf_file + ".conf") else: conffn = conf_file + ".conf" luafn = "%s.lua" % utils.splitext_plus(conffn)[0] if retriever: conffn, luafn = [(x if objectstore.is_remote(x) else None) for x in retriever.add_remotes([conffn, luafn], data["config"])] if not conffn: pass elif conf_file in conf_checkers and not conf_checkers[conf_file](data, retriever): logger.warn("Skipping vcfanno configuration: %s. Not all input files found." % conf_file) elif not objectstore.file_exists_or_remote(conffn): build = dd.get_genome_build(data) CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping.") logger.warn(CONF_NOT_FOUND.format(**locals())) else: out.append(conffn) if luafn and objectstore.file_exists_or_remote(luafn): out.append(luafn) return out
Find annotation configuration files for vcfanno, using pre-installed inputs. Creates absolute paths for user specified inputs and finds locally installed defaults. Default annotations: - gemini for variant pipelines - somatic for variant tumor pipelines - rnaedit for RNA-seq variant calling
def get_ascii(pid=None, name=None, pokemons=None, return_pokemons=False, message=None): '''get_ascii will return ascii art for a pokemon based on a name or pid. :param pid: the pokemon ID to return :param name: the pokemon name to return :param return_pokemons: return catches (default False) :param message: add a message to the ascii ''' pokemon = get_pokemon(name=name,pid=pid,pokemons=pokemons) printme = message if len(pokemon) > 0: for pid,data in pokemon.items(): if message == None: printme = data["name"].capitalize() print("%s\n\n%s" % (data['ascii'],printme)) if return_pokemons == True: return pokemon
get_ascii will return ascii art for a pokemon based on a name or pid. :param pid: the pokemon ID to return :param name: the pokemon name to return :param return_pokemons: return catches (default False) :param message: add a message to the ascii
def decorate_function(self, name, decorator): """ Decorate function with given name with given decorator. :param str name: Name of the function. :param callable decorator: Decorator callback. """ self.functions[name] = decorator(self.functions[name])
Decorate function with given name with given decorator. :param str name: Name of the function. :param callable decorator: Decorator callback.
def get_occurrence_times_for_event(event): """ Return a tuple with two sets containing the (start, end) *naive* datetimes of an Event's Occurrences, or the original start datetime if an Occurrence's start was modified by a user. """ occurrences_starts = set() occurrences_ends = set() for o in event.occurrence_list: occurrences_starts.add( coerce_naive(o.original_start or o.start) ) occurrences_ends.add( coerce_naive(o.original_end or o.end) ) return occurrences_starts, occurrences_ends
Return a tuple with two sets containing the (start, end) *naive* datetimes of an Event's Occurrences, or the original start datetime if an Occurrence's start was modified by a user.
def _get_nest_stats(self): """Helper method to deal with nestedStats as json format changed in v12.x """ for x in self.rdict: check = urlparse(x) if check.scheme: nested_dict = self.rdict[x]['nestedStats'] tmp_dict = nested_dict['entries'] return self._key_dot_replace(tmp_dict) return self._key_dot_replace(self.rdict)
Helper method to deal with nestedStats as json format changed in v12.x
def generate_rb_sequence(self, depth, gateset, seed=None, interleaver=None): """ Construct a randomized benchmarking experiment on the given qubits, decomposing into gateset. If interleaver is not provided, the returned sequence will have the form C_1 C_2 ... C_(depth-1) C_inv , where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected, and C_inv is selected so that the entire sequence composes to the identity. If an interleaver G (which must be a Clifford, and which will be decomposed into the native gateset) is provided, then the sequence instead takes the form C_1 G C_2 G ... C_(depth-1) G C_inv . The JSON response is a list of lists of indices, or Nones. In the former case, they are the index of the gate in the gateset. :param int depth: The number of Clifford gates to include in the randomized benchmarking experiment. This is different than the number of gates in the resulting experiment. :param list gateset: A list of pyquil gates to decompose the Clifford elements into. These must generate the clifford group on the qubits of interest. e.g. for one qubit [RZ(np.pi/2), RX(np.pi/2)]. :param seed: A positive integer used to seed the PRNG. :param interleaver: A Program object that encodes a Clifford element. :return: A list of pyquil programs. Each pyquil program is a circuit that represents an element of the Clifford group. When these programs are composed, the resulting Program will be the randomized benchmarking experiment of the desired depth. e.g. if the return programs are called cliffords then `sum(cliffords, Program())` will give the randomized benchmarking experiment, which will compose to the identity program. """ # Support QubitPlaceholders: we temporarily index to arbitrary integers. # `generate_rb_sequence` handles mapping back to the original gateset gates. gateset_as_program = address_qubits(sum(gateset, Program())) qubits = len(gateset_as_program.get_qubits()) gateset_for_api = gateset_as_program.out().splitlines() if interleaver: assert(isinstance(interleaver, Program)) interleaver = interleaver.out() depth = int(depth) # needs to be jsonable, no np.int64 please! payload = RandomizedBenchmarkingRequest(depth=depth, qubits=qubits, gateset=gateset_for_api, seed=seed, interleaver=interleaver) response = self.client.call('generate_rb_sequence', payload) # type: RandomizedBenchmarkingResponse programs = [] for clifford in response.sequence: clifford_program = Program() # Like below, we reversed the order because the API currently hands back the Clifford # decomposition right-to-left. for index in reversed(clifford): clifford_program.inst(gateset[index]) programs.append(clifford_program) # The programs are returned in "textbook style" right-to-left order. To compose them into # the correct pyquil program, we reverse the order. return list(reversed(programs))
Construct a randomized benchmarking experiment on the given qubits, decomposing into gateset. If interleaver is not provided, the returned sequence will have the form C_1 C_2 ... C_(depth-1) C_inv , where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected, and C_inv is selected so that the entire sequence composes to the identity. If an interleaver G (which must be a Clifford, and which will be decomposed into the native gateset) is provided, then the sequence instead takes the form C_1 G C_2 G ... C_(depth-1) G C_inv . The JSON response is a list of lists of indices, or Nones. In the former case, they are the index of the gate in the gateset. :param int depth: The number of Clifford gates to include in the randomized benchmarking experiment. This is different than the number of gates in the resulting experiment. :param list gateset: A list of pyquil gates to decompose the Clifford elements into. These must generate the clifford group on the qubits of interest. e.g. for one qubit [RZ(np.pi/2), RX(np.pi/2)]. :param seed: A positive integer used to seed the PRNG. :param interleaver: A Program object that encodes a Clifford element. :return: A list of pyquil programs. Each pyquil program is a circuit that represents an element of the Clifford group. When these programs are composed, the resulting Program will be the randomized benchmarking experiment of the desired depth. e.g. if the return programs are called cliffords then `sum(cliffords, Program())` will give the randomized benchmarking experiment, which will compose to the identity program.
def put_value(self, value, timeout=None): """Put a value to the Attribute and wait for completion""" self._context.put(self._data.path + ["value"], value, timeout=timeout)
Put a value to the Attribute and wait for completion
def duplicate(self): """Duplicates the matcher to others.""" other = Matcher(self.loc, self.check_var, self.checkdefs, self.names, self.var_index) other.insert_check(0, "not " + self.check_var) self.others.append(other) return other
Duplicates the matcher to others.
def main(): """ Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto """ # Define Parser object and add to Toil parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') # Generate subparsers subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline') group = parser_run.add_mutually_exclusive_group() parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str, help='Path to the (filled in) config file, generated with "generate-config".') group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') group.add_argument('--sample', nargs='+', action=required_length(2, 3), help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].') # Print docstring help if no arguments provided if len(sys.argv) == 1: parser.print_help() sys.exit(1) Job.Runner.addToilOptions(parser_run) args = parser.parse_args() # Parse subparsers related to generation of config and manifest cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config)) if not args.sample: args.sample = None require(os.path.exists(args.manifest), '{} not found and no sample provided. ' 'Please run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} config = argparse.Namespace(**parsed_config) config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest) # Sanity checks require(config.ref, 'Missing URL for reference file: {}'.format(config.ref)) require(config.output_dir, 'No output location specified: {}'.format(config.output_dir)) # Launch Pipeline Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)
Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto
def launch_host_event_handler(self, host): """Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None """ host.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True)
Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None
def get_relationship_admin_session_for_family(self, family_id): """Gets the ``OsidSession`` associated with the relationship administration service for the given family. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` return: (osid.relationship.RelationshipAdminSession) - a ``RelationshipAdminSession`` raise: NotFound - no family found by the given ``Id`` raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_admin()`` and ``supports_visible_federation()`` are ``true``* """ if not self.supports_relationship_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.RelationshipAdminSession(family_id, runtime=self._runtime)
Gets the ``OsidSession`` associated with the relationship administration service for the given family. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` return: (osid.relationship.RelationshipAdminSession) - a ``RelationshipAdminSession`` raise: NotFound - no family found by the given ``Id`` raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_relationship_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_relationship_admin()`` and ``supports_visible_federation()`` are ``true``*
def get_single_group_membership_users(self, user_id, group_id): """ Get a single group membership. Returns the group membership with the given membership id or user id. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("GET /api/v1/groups/{group_id}/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/users/{user_id}".format(**path), data=data, params=params, single_item=True)
Get a single group membership. Returns the group membership with the given membership id or user id.
def translate_to_international_phonetic_alphabet(self, hide_stress_mark=False): ''' θ½¬ζ’ζˆε›½ι™…ιŸ³ζ ‡γ€‚εͺ要一δΈͺε…ƒιŸ³ηš„ζ—Άε€™ιœ€θ¦ιšθ—ι‡ιŸ³ζ ‡θ―† :param hide_stress_mark: :return: ''' translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.ipa return translations
θ½¬ζ’ζˆε›½ι™…ιŸ³ζ ‡γ€‚εͺ要一δΈͺε…ƒιŸ³ηš„ζ—Άε€™ιœ€θ¦ιšθ—ι‡ιŸ³ζ ‡θ―† :param hide_stress_mark: :return:
def _run_cwltool(args): """Run with cwltool -- reference implementation. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cwltool_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpcwl")) log_file = os.path.join(work_dir, "%s-cwltool.log" % project_name) os.environ["TMPDIR"] = tmp_dir flags = ["--tmpdir-prefix", tmp_dir, "--tmp-outdir-prefix", tmp_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "--preserve-environment", "HOME"] cmd = ["cwltool"] + flags + args.toolargs + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file=log_file)
Run with cwltool -- reference implementation.
def csl_styles(**kwargs): ''' Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles() ''' base = "https://api.github.com/repos/citation-style-language/styles" tt = requests.get(base + '/commits?per_page=1', **kwargs) tt.raise_for_status() check_json(tt) commres = tt.json() sha = commres[0]['sha'] sty = requests.get(base + "/git/trees/" + sha, **kwargs) sty.raise_for_status() check_json(sty) res = sty.json() files = [ z['path'] for z in res['tree'] ] matches = [ re.search(".csl", g) for g in files ] csls = [ x.string for x in filter(None, matches) ] return [ re.sub(".csl", "", x) for x in csls ]
Get list of styles from https://github.com/citation-style-language/styles :param kwargs: any additional arguments will be passed on to `requests.get` :return: list, of CSL styles Usage:: from habanero import cn cn.csl_styles()
def delete_instance(self, instance_id): ''' method for removing an instance from AWS EC2 :param instance_id: string of instance id on AWS :return: string reporting state of instance ''' title = '%s.delete_instance' % self.__class__.__name__ # validate inputs input_fields = { 'instance_id': instance_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # report query self.iam.printer('Removing instance %s from AWS region %s.' % (instance_id, self.iam.region_name)) # retrieve state old_state = self.check_instance_state(instance_id) # discover tags associated with instance id tag_list = [] try: response = self.connection.describe_tags( Filters=[ { 'Name': 'resource-id', 'Values': [ instance_id ] } ] ) import re aws_tag_pattern = re.compile('aws:') for i in range(0, len(response['Tags'])): if not aws_tag_pattern.findall(response['Tags'][i]['Key']): tag = {} tag['Key'] = response['Tags'][i]['Key'] tag['Value'] = response['Tags'][i]['Value'] tag_list.append(tag) except: raise AWSConnectionError(title) # remove tags from instance try: self.connection.delete_tags( Resources=[ instance_id ], Tags=tag_list ) self.iam.printer('Tags have been deleted from %s.' % instance_id) except: raise AWSConnectionError(title) # stop instance try: self.connection.stop_instances( InstanceIds=[ instance_id ] ) except: raise AWSConnectionError(title) # terminate instance try: response = self.connection.terminate_instances( InstanceIds=[ instance_id ] ) new_state = response['TerminatingInstances'][0]['CurrentState']['Name'] except: raise AWSConnectionError(title) # report outcome and return true self.iam.printer('Instance %s was %s.' % (instance_id, old_state)) self.iam.printer('Instance %s is %s.' % (instance_id, new_state)) return new_state
method for removing an instance from AWS EC2 :param instance_id: string of instance id on AWS :return: string reporting state of instance
def date_to_timestamp(date): """ date to unix timestamp in milliseconds """ date_tuple = date.timetuple() timestamp = calendar.timegm(date_tuple) * 1000 return timestamp
date to unix timestamp in milliseconds
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed
def batting_avg(self, benchmark): """Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float """ diff = self.excess_ret(benchmark) return np.count_nonzero(diff > 0.0) / diff.count()
Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float
def parse(self, fd): """very simple parser - but why would we want it to be complex?""" def resolve_args(args): # FIXME break this out, it's in common with the templating stuff elsewhere root = self.sections[0] val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items()) resolved_args = [] for arg in args: for subst, value in val_dict.items(): arg = arg.replace(subst, value) resolved_args.append(arg) return resolved_args def handle_section_defn(keyword, parts): if keyword == '@HostAttrs': if len(parts) != 1: raise ParserException('usage: @HostAttrs <hostname>') if self.sections[0].has_pending_with(): raise ParserException('@with not supported with @HostAttrs') self.sections.append(HostAttrs(parts[0])) return True if keyword == 'Host': if len(parts) != 1: raise ParserException('usage: Host <hostname>') self.sections.append(Host(parts[0], self.sections[0].pop_pending_with())) return True def handle_vardef(root, keyword, parts): if keyword == '@with': root.add_pending_with(parts) return True def handle_set_args(_, parts): if len(parts) == 0: raise ParserException('usage: @args arg-name ...') if not self.is_include(): return if self._args is None or len(self._args) != len(parts): raise ParserException('required arguments not passed to include {url} ({parts})'.format( url=self._url, parts=', '.join(parts)) ) root = self.sections[0] for key, value in zip(parts, self._args): root.set_value(key, value) def handle_set_value(_, parts): if len(parts) != 2: raise ParserException('usage: @set <key> <value>') root = self.sections[0] root.set_value(*resolve_args(parts)) def handle_add_type(section, parts): if len(parts) != 1: raise ParserException('usage: @is <HostAttrName>') section.add_type(parts[0]) def handle_via(section, parts): if len(parts) != 1: raise ParserException('usage: @via <Hostname>') section.add_line( 'ProxyCommand', ('ssh {args} nc %h %p 2> /dev/null'.format(args=pipes.quote(resolve_args(parts)[0])), ) ) def handle_identity(section, parts): if len(parts) != 1: raise ParserException('usage: @identity <name>') section.add_identity(resolve_args(parts)[0]) def handle_include(_, parts): if len(parts) == 0: raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]') url = parts[0] parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == 'https': req = requests.get(url, verify=self._verify_ssl) text = req.text elif parsed_url.scheme == 'file': with open(parsed_url.path) as fd: text = fd.read() elif parsed_url.scheme == '': path = os.path.expanduser(url) with open(path) as fd: text = fd.read() else: raise SecurityException('error: @includes may only use paths or https:// or file:// URLs') subconfig = SedgeEngine( self._key_library, StringIO(text), self._verify_ssl, url=url, args=resolve_args(parts[1:]), parent_keydefs=self.keydefs, via_include=True) self.includes.append((url, subconfig)) def handle_keydef(_, parts): if len(parts) < 2: raise ParserException('usage: @key <name> [fingerprint]...') name = parts[0] fingerprints = parts[1:] self.keydefs[name] = fingerprints def handle_keyword(section, keyword, parts): handlers = { '@set': handle_set_value, '@args': handle_set_args, '@is': handle_add_type, '@via': handle_via, '@include': handle_include, '@key': handle_keydef, '@identity': handle_identity } if keyword in handlers: handlers[keyword](section, parts) return True for line in (t.strip() for t in fd): if line.startswith('#') or line == '': continue keyword, parts = SedgeEngine.parse_config_line(line) if handle_section_defn(keyword, parts): continue if handle_vardef(self.sections[0], keyword, parts): continue current_section = self.sections[-1] if handle_keyword(current_section, keyword, parts): continue if keyword.startswith('@'): raise ParserException("unknown expansion keyword {}".format(keyword)) # use other rather than parts to avoid messing up user # whitespace; we don't handle quotes in here as we don't # need to current_section.add_line(keyword, parts)
very simple parser - but why would we want it to be complex?
def process_services(self, device_ids=None, removed_devices_info=None): """Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None """ LOG.debug("Processing services started") # Now we process only routing service, additional services will be # added in future if self.routing_service_helper: self.routing_service_helper.process_service(device_ids, removed_devices_info) else: LOG.warning("No routing service helper loaded") LOG.debug("Processing services completed")
Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs): """ Wrapper for the ._request method that verifies if we're logged into RightScale before making a call, and sanity checks the oauth expiration time. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object. """ # On every call, check if we're both logged in, and if the token is # expiring. If it is, we'll re-login with the information passed into # us at instantiation. if time.time() > self.auth_expires_at: self.login() # Now make the actual API call return self._request(method, path, url, ignore_codes, **kwargs)
Wrapper for the ._request method that verifies if we're logged into RightScale before making a call, and sanity checks the oauth expiration time. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object.
def login(): """ This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY' """ cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY'] redirect_url = create_cas_login_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGIN_ROUTE'], flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True)) if 'ticket' in flask.request.args: flask.session[cas_token_session_key] = flask.request.args['ticket'] if cas_token_session_key in flask.session: if validate(flask.session[cas_token_session_key]): if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session: redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL') elif flask.request.args.get('origin'): redirect_url = flask.request.args['origin'] else: redirect_url = flask.url_for( current_app.config['CAS_AFTER_LOGIN']) else: del flask.session[cas_token_session_key] current_app.logger.debug('Redirecting to: {0}'.format(redirect_url)) return flask.redirect(redirect_url)
This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY'
def get_multi_temperature_data(kt0=1.0, kt1=5.0, length0=10000, length1=10000, n0=10, n1=10): """ Continuous MCMC process in an asymmetric double well potential at multiple temperatures. Parameters ---------- kt0: double, optional, default=1.0 Temperature in kT for the first thermodynamic state. kt1: double, optional, default=5.0 Temperature in kT for the second thermodynamic state. length0: int, optional, default=10000 Trajectory length in steps for the first thermodynamic state. length1: int, optional, default=10000 Trajectory length in steps for the second thermodynamic state. n0: int, optional, default=10 Number of trajectories in the first thermodynamic state. n1: int, optional, default=10 Number of trajectories in the second thermodynamic state. Returns ------- dict - keys shown below in brackets Trajectory (trajs), energy (energy_trajs), and temperature (temp_trajs) data from the MCMC runs as well as the discretised version (dtrajs + centers). Energies and temperatures are given in kT, lengths in arbitrary units. """ dws = _DWS() mt_data = dws.mt_sample( kt0=kt0, kt1=kt1, length0=length0, length1=length1, n0=n0, n1=n1) mt_data.update(centers=dws.centers) return mt_data
Continuous MCMC process in an asymmetric double well potential at multiple temperatures. Parameters ---------- kt0: double, optional, default=1.0 Temperature in kT for the first thermodynamic state. kt1: double, optional, default=5.0 Temperature in kT for the second thermodynamic state. length0: int, optional, default=10000 Trajectory length in steps for the first thermodynamic state. length1: int, optional, default=10000 Trajectory length in steps for the second thermodynamic state. n0: int, optional, default=10 Number of trajectories in the first thermodynamic state. n1: int, optional, default=10 Number of trajectories in the second thermodynamic state. Returns ------- dict - keys shown below in brackets Trajectory (trajs), energy (energy_trajs), and temperature (temp_trajs) data from the MCMC runs as well as the discretised version (dtrajs + centers). Energies and temperatures are given in kT, lengths in arbitrary units.