code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def from_dict(self, dingos_obj_dict, config_hooks=None, namespace_dict=None, ): """ Convert DingoObjDict to facts and associate resulting facts with this information object. """ # Instantiate default parameters if '@@ns' in dingos_obj_dict.keys(): top_level_namespace = (namespace_dict.get(dingos_obj_dict.get('@@ns'),None),dingos_obj_dict.get('@@ns')) else: top_level_namespace = (None,None) if not config_hooks: config_hooks = {} datatype_extractor = config_hooks.get('datatype_extractor', (lambda io, f, i, n, d: False)) special_ft_handler = config_hooks.get('special_ft_handler', None) attr_ignore_predicate = config_hooks.get('attr_ignore_predicate', None) force_nonleaf_fact_predicate = config_hooks.get('force_nonleaf_fact_predicate', None) if not namespace_dict: namespace_dict = {} namespace_uri_2_pk_mapping = dict(self._DCM['DataTypeNameSpace'].objects.values_list('uri','id')) if not self.is_empty(): logger.debug("Non-empty info object %s (timestamp %s, pk %s ) is overwritten with new information" % (self.identifier, self.timestamp, self.pk)) self.clear() # Flatten the DingoObjDict (flat_list, attrs) = dingos_obj_dict.flatten(attr_ignore_predicate=attr_ignore_predicate, force_nonleaf_fact_predicate=force_nonleaf_fact_predicate, namespace_dict=namespace_dict) for fact in flat_list: # Collect the information about all attributes relevant # for this node (i.e., they occur either directly on # the node or on an ancestor node). # The following code was used to generate a attribute dictionary:: # # attr_info = ExtendedSortedDict() # for attr_node in attrs.keys(): # if fact['node_id'].startswith(attr_node): # for (key, value) in attrs[attr_node].items(): # attr_info.chained_set(value, 'set', key, attr_node) # # The dictionary contained full information about also the attributes given # to ancestor nodes. So far, we have not required this # information for our imports, and thus simplify to a # dictionary that only contains the attributes directly # associated with the current node. # # If, at a later stage, we find we need this kind of information, # we will can use Django's MultiValueDict to add additional # information without changing the signature of the receiving # predicate and handler functions. attr_info = dict(attrs.get(fact['node_id'],[])) #for attr_node in attrs.keys(): # if fact['node_id'] == (attr_node): # for (key, value) in attrs[attr_node].items(): # attr_info.chained_set(value, 'set', key, attr_node) # Fill dictionary with arguments for call to 'add_fact' add_fact_kargs = {} add_fact_kargs['fact_dt_kind'] = FactDataType.UNKNOWN_KIND add_fact_kargs['fact_dt_namespace_name'] = "%s-%s" % ( self.iobject_family.name, self.iobject_family_revision.name) # See whether the datatype extractor has found a datatype for the value datatype_found = datatype_extractor(self, fact, attr_info, namespace_dict, add_fact_kargs) if not datatype_found: add_fact_kargs = {} add_fact_kargs['fact_dt_kind'] = FactDataType.NO_VOCAB add_fact_kargs['fact_dt_namespace_name'] = DINGOS_NAMESPACE_SLUG add_fact_kargs['fact_dt_namespace_uri'] = DINGOS_NAMESPACE_URI else: # Check whether the datatype extractor added namespace information # If not, add some here if not 'fact_dt_namespace_uri' in add_fact_kargs: add_fact_kargs['fact_dt_namespace_uri'] = namespace_dict.get( add_fact_kargs['fact_dt_namespace_name'], '%s/%s' % ( DINGOS_NAMESPACE_URI, self.iobject_family)) add_fact_kargs['fact_term_name'] = fact['term'] add_fact_kargs['fact_term_attribute'] = fact['attribute'] add_fact_kargs['values'] = [fact['value']] add_fact_kargs['node_id_name'] = fact['node_id'] add_fact_kargs['namespaces'] = fact['namespaces'] add_fact_kargs['top_level_namespace'] = top_level_namespace handler_return_value = True logger.debug("Treating fact (before special handler list) %s with attr_info %s and kargs %s" % (fact, attr_info, add_fact_kargs)) # Below, go through the handlers in the special_ft_handler list -- # if the predicate returns True for the fact, execute the handler # on the fact. If a handler returns False/None, the fact is *not* # added. This should only be done, if the handler has added the # fact -- otherwise the sequence of node identifiers is messed up! if special_ft_handler: for (predicate, handler) in special_ft_handler: if predicate(fact, attr_info): handler_return_value = handler(self, fact, attr_info, add_fact_kargs) if not handler_return_value: break logger.debug("Treating fact (before special handler list) %s with attr_info %s and kargs %s" % (fact, attr_info, add_fact_kargs)) if (handler_return_value == True): add_fact_kargs['ns_uri_dict'] = namespace_uri_2_pk_mapping e2f_obj = self.add_fact(**add_fact_kargs) elif not handler_return_value: continue else: e2f_obj = handler_return_value self.set_name()
Convert DingoObjDict to facts and associate resulting facts with this information object.
def udf(x): """ No-op routine for with an argument signature matching udfuns. Allways returns 0.0 . https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/udf_c.html :param x: Double precision value, unused. :type x: float :return: Double precision value, unused. :rtype: float """ x = ctypes.c_double(x) value = ctypes.c_double() libspice.udf_c(x, ctypes.byref(value)) return value.value
No-op routine for with an argument signature matching udfuns. Allways returns 0.0 . https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/udf_c.html :param x: Double precision value, unused. :type x: float :return: Double precision value, unused. :rtype: float
def _startServiceJobs(self): """Start any service jobs available from the service manager""" self.issueQueingServiceJobs() while True: serviceJob = self.serviceManager.getServiceJobsToStart(0) # Stop trying to get jobs when function returns None if serviceJob is None: break logger.debug('Launching service job: %s', serviceJob) self.issueServiceJob(serviceJob)
Start any service jobs available from the service manager
def _hash_data(hasher, data): """Generate hash of data using provided hash type. :param hasher: Hasher instance to use as a base for calculating hash :type hasher: cryptography.hazmat.primitives.hashes.Hash :param bytes data: Data to sign :returns: Hash of data :rtype: bytes """ _hasher = hasher.copy() _hasher.update(data) return _hasher.finalize()
Generate hash of data using provided hash type. :param hasher: Hasher instance to use as a base for calculating hash :type hasher: cryptography.hazmat.primitives.hashes.Hash :param bytes data: Data to sign :returns: Hash of data :rtype: bytes
def create_node(participant_id): """Send a POST request to the node table. This makes a new node for the participant, it calls: 1. exp.get_network_for_participant 2. exp.create_node 3. exp.add_node_to_network 4. exp.node_post_request """ exp = Experiment(session) # Get the participant. try: participant = models.Participant.query.filter_by(id=participant_id).one() except NoResultFound: return error_response(error_type="/node POST no participant found", status=403) # Make sure the participant status is working if participant.status != "working": error_type = "/node POST, status = {}".format(participant.status) return error_response(error_type=error_type, participant=participant) # execute the request network = exp.get_network_for_participant(participant=participant) if network is None: return Response(dumps({"status": "error"}), status=403) node = exp.create_node(participant=participant, network=network) assign_properties(node) exp.add_node_to_network(node=node, network=network) # ping the experiment exp.node_post_request(participant=participant, node=node) # return the data return success_response(node=node.__json__())
Send a POST request to the node table. This makes a new node for the participant, it calls: 1. exp.get_network_for_participant 2. exp.create_node 3. exp.add_node_to_network 4. exp.node_post_request
def dcmdottoang_vel(R,Rdot): """Convert a rotation matrix to angular velocity w - angular velocity in inertial frame Omega - angular velocity in body frame """ w = vee_map(Rdot.dot(R.T)) Omega = vee_map(R.T.dot(Rdot)) return (w, Omega)
Convert a rotation matrix to angular velocity w - angular velocity in inertial frame Omega - angular velocity in body frame
def _dict_to_stanza(key, stanza): ''' Convert a dict to a multi-line stanza ''' ret = '' for skey in stanza: if stanza[skey] is True: stanza[skey] = '' ret += ' {0} {1}\n'.format(skey, stanza[skey]) return '{0} {{\n{1}}}'.format(key, ret)
Convert a dict to a multi-line stanza
def temp_copy_extracted_submission(self): """Creates a temporary copy of extracted submission. When executed, submission is allowed to modify it's own directory. So to ensure that submission does not pass any data between runs, new copy of the submission is made before each run. After a run temporary copy of submission is deleted. Returns: directory where temporary copy is located """ tmp_copy_dir = os.path.join(self.submission_dir, 'tmp_copy') shell_call(['cp', '-R', os.path.join(self.extracted_submission_dir), tmp_copy_dir]) return tmp_copy_dir
Creates a temporary copy of extracted submission. When executed, submission is allowed to modify it's own directory. So to ensure that submission does not pass any data between runs, new copy of the submission is made before each run. After a run temporary copy of submission is deleted. Returns: directory where temporary copy is located
def create_searchspace(lookup, fastafn, proline_cut=False, reverse_seqs=True, do_trypsinize=True): """Given a FASTA database, proteins are trypsinized and resulting peptides stored in a database or dict for lookups""" allpeps = [] for record in SeqIO.parse(fastafn, 'fasta'): if do_trypsinize: pepseqs = trypsinize(record.seq, proline_cut) else: pepseqs = [record.seq] # Exchange all leucines to isoleucines because MS can't differ pepseqs = [(str(pep).replace('L', 'I'),) for pep in pepseqs] allpeps.extend(pepseqs) if len(allpeps) > 1000000: # more than x peps, write to SQLite lookup.write_peps(allpeps, reverse_seqs) allpeps = [] # write remaining peps to sqlite lookup.write_peps(allpeps, reverse_seqs) lookup.index_peps(reverse_seqs) lookup.close_connection()
Given a FASTA database, proteins are trypsinized and resulting peptides stored in a database or dict for lookups
def parse_changes(): """ grab version from CHANGES and validate entry """ with open('CHANGES') as changes: for match in re.finditer(RE_CHANGES, changes.read(1024), re.M): if len(match.group(1)) != len(match.group(3)): error('incorrect underline in CHANGES') date = datetime.datetime.strptime(match.group(4), '%Y-%m-%d').date() if date != datetime.date.today(): error('release date is not today') return match.group(2) error('invalid release entry in CHANGES')
grab version from CHANGES and validate entry
def match_qualifier_id(self, qualifier_id, match): """Matches the qualifier identified by the given ``Id``. arg: qualifier_id (osid.id.Id): the Id of the ``Qualifier`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``qualifier_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('qualifierId', str(qualifier_id), bool(match))
Matches the qualifier identified by the given ``Id``. arg: qualifier_id (osid.id.Id): the Id of the ``Qualifier`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``qualifier_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def formfield_for_dbfield(self, db_field, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ formfield = super().formfield_for_dbfield(db_field, **kwargs) if db_field.name == 'image': formfield.widget = ImageRelatedFieldWidgetWrapper( ImageSelect(), db_field.rel, self.admin_site, can_add_related=True, can_change_related=True, ) return formfield
Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor.
def columns(self): """获取用户专栏. :return: 用户专栏,返回生成器 :rtype: Column.Iterable """ from .column import Column if self.url is None or self.post_num == 0: return soup = BeautifulSoup(self._session.get(self.url + 'posts').text) column_list = soup.find('div', class_='column-list') column_tags = column_list.find_all('div', class_='item') for column_tag in column_tags: name = column_tag['title'] url = column_tag['data-href'] numbers = column_tag.find('span', class_='des').text.split('•') follower_num = int(re_get_number.match(numbers[0]).group(1)) if len(numbers) == 1: post_num = 0 else: post_num = int( re_get_number.match(numbers[1]).group(1)) yield Column(url, name, follower_num, post_num, session=self._session)
获取用户专栏. :return: 用户专栏,返回生成器 :rtype: Column.Iterable
def run(self): """ Plan: * We read into a fresh instance of IO obj until marker encountered. * When marker is detected, we attach that IO obj to "results" array and signal the calling code (through threading.Event flag) that results are available * repeat until .stop() was called on the thread. """ marker = ['' for l in self._stream_delimiter] # '' is there on purpose tf = self._obj[0](*self._obj[1], **self._obj[2]) while not self._stop: l = os.read(self._r, 1) marker.pop(0) marker.append(l) if marker != self._stream_delimiter: tf.write(unicode(l)) else: # chopping off the marker first tf.seek(self._stream_roll_back_len, 2) tf.truncate() tf.seek(0) self._data_unoccupied.wait(5) # seriously, how much time is needed to get your items off the stack? self._data.append(tf) self._data_available.set() tf = self._obj[0](*self._obj[1], **self._obj[2]) os.close(self._r) tf.close() del tf
Plan: * We read into a fresh instance of IO obj until marker encountered. * When marker is detected, we attach that IO obj to "results" array and signal the calling code (through threading.Event flag) that results are available * repeat until .stop() was called on the thread.
def get_option(self, key, default=None): """Return option from synchronizer (possibly overridden by target extra_opts).""" if self.synchronizer: return self.extra_opts.get(key, self.synchronizer.options.get(key, default)) return self.extra_opts.get(key, default)
Return option from synchronizer (possibly overridden by target extra_opts).
def summary_df_from_list(results_list, names, **kwargs): """Make a panda data frame of the mean and std devs of each element of a list of 1d arrays, including the uncertainties on the values. This just converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_list: list of 1d numpy arrays Must have same length as names. names: list of strs Names for the output df's columns. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """ for arr in results_list: assert arr.shape == (len(names),) df = pd.DataFrame(np.stack(results_list, axis=0)) df.columns = names return summary_df(df, **kwargs)
Make a panda data frame of the mean and std devs of each element of a list of 1d arrays, including the uncertainties on the values. This just converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_list: list of 1d numpy arrays Must have same length as names. names: list of strs Names for the output df's columns. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details.
def view_admin_log(): """Page for viewing the log of admin activity.""" build = g.build # TODO: Add paging log_list = ( models.AdminLog.query .filter_by(build_id=build.id) .order_by(models.AdminLog.created.desc()) .all()) return render_template( 'view_admin_log.html', build=build, log_list=log_list)
Page for viewing the log of admin activity.
def ticket_flag(self, which, new=None): """ Get or set a ticket flag. 'which' can be either a string ('APPEND_CR' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing. """ flag = _get_flag(which, TicketFlags) if flag: if not self.capabilities.have_ticket_flag(flag): raise yubikey_base.YubiKeyVersionError('Ticket flag %s requires %s, and this is %s %d.%d' % (which, flag.req_string(self.capabilities.model), \ self.capabilities.model, self.ykver[0], self.ykver[1])) req_major, req_minor = flag.req_version() self._require_version(major=req_major, minor=req_minor) value = flag.to_integer() else: if type(which) is not int: raise yubico_exception.InputError('Unknown non-integer TicketFlag (%s)' % which) value = which return self.ticket_flags.get_set(value, new)
Get or set a ticket flag. 'which' can be either a string ('APPEND_CR' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing.
def fix_jumps(self, row_selected, delta): '''fix up jumps when we add/remove rows''' numrows = self.grid_mission.GetNumberRows() for row in range(numrows): command = self.grid_mission.GetCellValue(row, ME_COMMAND_COL) if command in ["DO_JUMP", "DO_CONDITION_JUMP"]: p1 = int(float(self.grid_mission.GetCellValue(row, ME_P1_COL))) if p1 > row_selected and p1+delta>0: self.grid_mission.SetCellValue(row, ME_P1_COL, str(float(p1+delta)))
fix up jumps when we add/remove rows
def run_pyvcf(args): """Main program entry point after parsing arguments""" # open VCF reader reader = vcf.Reader(filename=args.input_vcf) # optionally, open VCF writer writer = None # read through input VCF file, optionally also writing out start = time.clock() num = 0 for num, r in enumerate(reader): if num % 10000 == 0: print(num, "".join(map(str, [r.CHROM, ":", r.POS])), sep="\t", file=sys.stderr) if writer: writer.write_record(r) if args.max_records and num >= args.max_records: break end = time.clock() print("Read {} records in {} seconds".format(num, (end - start)), file=sys.stderr)
Main program entry point after parsing arguments
def _acronym_lic(self, license_statement): """Convert license acronym.""" pat = re.compile(r'\(([\w+\W?\s?]+)\)') if pat.search(license_statement): lic = pat.search(license_statement).group(1) if lic.startswith('CNRI'): acronym_licence = lic[:4] else: acronym_licence = lic.replace(' ', '') else: acronym_licence = ''.join( [w[0] for w in license_statement.split(self.prefix_lic)[1].split()]) return acronym_licence
Convert license acronym.
def define_points_grid(self): """ This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py. """ # Grid making step # In this case, an output at different (x,y), e.g., on a grid, is desired # First, see if there is a need for a grid, and then make it # latlon arrays must have a pre-set grid if self.latlon == False: # Warn that any existing grid will be overwritten try: self.dx if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: try: self.dy if self.Quiet == False: print("dx and dy being overwritten -- supply a full grid") except: pass # Boundaries n = np.max(self.y) + self.alpha s = np.min(self.y) - self.alpha w = np.min(self.x) + self.alpha e = np.max(self.x) - self.alpha # Grid spacing dxprelim = self.alpha/50. # x or y nx = np.ceil((e-w)/dxprelim) ny = np.ceil((n-s)/dxprelim) dx = (e-w) / nx dy = (n-s) / ny self.dx = self.dy = (dx+dy)/2. # Average of these to create a # square grid for more compatibility self.xw = np.linspace(w, e, nx) self.yw = np.linspace(s, n, ny) else: print("Lat/lon xw and yw must be pre-set: grid will not be square") print("and may run into issues with poles, so to ensure the proper") print("output points are chosen, the end user should do this.") sys.exit()
This is experimental code that could be used in the spatialDomainNoGrid section to build a grid of points on which to generate the solution. However, the current development plan (as of 27 Jan 2015) is to have the end user supply the list of points where they want a solution (and/or for it to be provided in a more automated way by GRASS GIS). But because this (untested) code may still be useful, it will remain as its own function here. It used to be in f2d.py.
def activate(self): """ Activate an plan in a CREATED state. """ obj = self.find_paypal_object() if obj.state == enums.BillingPlanState.CREATED: success = obj.activate() if not success: raise PaypalApiError("Failed to activate plan: %r" % (obj.error)) # Resync the updated data to the database self.get_or_update_from_api_data(obj, always_sync=True) return obj
Activate an plan in a CREATED state.
def delete(self, pk, **kwargs): """ Delete the object by primary_key: .. code-block:: python DBSession.sacrud(Users).delete(1) DBSession.sacrud(Users).delete('1') DBSession.sacrud(User2Groups).delete({'user_id': 4, 'group_id': 2}) JSON support: .. code-block:: python DBSession.sacrud(User2Groups).delete( '{"user_id": 4, "group_id": 2}' ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``. """ pk = unjson(pk) obj = get_obj(self.session, self.table, pk) if self._delete(obj, **kwargs): return {'pk': pk, 'name': obj.__repr__()}
Delete the object by primary_key: .. code-block:: python DBSession.sacrud(Users).delete(1) DBSession.sacrud(Users).delete('1') DBSession.sacrud(User2Groups).delete({'user_id': 4, 'group_id': 2}) JSON support: .. code-block:: python DBSession.sacrud(User2Groups).delete( '{"user_id": 4, "group_id": 2}' ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``.
def num_workers(self): """Returns the number of worker nodes. Returns ------- size :int The number of worker nodes. """ size = ctypes.c_int() check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size))) return size.value
Returns the number of worker nodes. Returns ------- size :int The number of worker nodes.
def create_col_nums(): """Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples. """ NUM_REPEATS = 2 column_letters = list( string.ascii_uppercase ) + map( ''.join, itertools.product( string.ascii_uppercase, repeat=NUM_REPEATS ) ) letter_numbers = [] count = 1 for letter in column_letters: letter_numbers.append((count, str(count) + ' (' + letter + ')')) count += 1 return tuple(letter_numbers)
Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples.
def xy_data(xdata, ydata, eydata=None, exdata=None, label=None, xlabel='', ylabel='', \ title='', shell_history=0, xshift=0, yshift=0, xshift_every=1, yshift_every=1, \ coarsen=0, style=None, clear=True, axes=None, xscale='linear', yscale='linear', grid=False, \ legend='best', legend_max=20, autoformat=True, autoformat_window=True, tall=False, draw=True, **kwargs): """ Plots specified data. Parameters ---------- xdata, ydata Arrays (or arrays of arrays) of data to plot eydata=None, exdata=None Arrays of x and y errorbar values label=None String or array of strings for the line labels xlabel='' Label for the x-axis ylabel='' Label for the y-axis title='' Title for the axes; set to None to have nothing. shell_history=0 How many commands from the pyshell history to include with the title xshift=0, yshift=0 Progressive shifts on the data, to make waterfall plots xshift_every=1 Perform the progressive shift every 1 or n'th line. yshift_every=1 perform the progressive shift every 1 or n'th line. style=None style cycle object. clear=True If no axes are specified (see below), clear the figure, otherwise clear just the axes. axes=None Which matplotlib axes to use, or "gca" for the current axes xscale='linear', yscale='linear' 'linear' or 'log' x and y axis scales. grid=False Should we draw a grid on the axes? legend='best' Where to place the legend (see pylab.legend() for options) Set this to None to ignore the legend. legend_max=20 Number of legend entries before it's truncated with '...' autoformat=True Should we format the figure for printing? autoformat_window=True Should we resize and reposition the window when autoformatting? tall=False Should the format be tall? draw=True Whether or not to draw the plot after plotting. See matplotlib's errorbar() function for additional optional keyword arguments. """ _pylab.ioff() # Make sure the dimensionality of the data sets matches xdata, ydata = _match_data_sets(xdata, ydata) exdata = _match_error_to_data_set(xdata, exdata) eydata = _match_error_to_data_set(ydata, eydata) # check that the labels is a list of strings of the same length if not _fun.is_iterable(label): label = [label]*len(xdata) while len(label) < len(ydata): label.append(label[0]) # concatenate if necessary if len(label) > legend_max: label[legend_max-2] = '...' for n in range(legend_max-1,len(label)-1): label[n] = "_nolegend_" # clear the figure? if clear and not axes: _pylab.gcf().clear() # axes cleared later # setup axes if axes=="gca" or axes is None: axes = _pylab.gca() # if we're clearing the axes if clear: axes.clear() # set the current axes _pylab.axes(axes) # now loop over the list of data in xdata and ydata for n in range(0,len(xdata)): # get the label if label[n]=='_nolegend_': l = '_nolegend_' else: l = str(n)+": "+str(label[n]) # calculate the x an y progressive shifts dx = xshift*(n/xshift_every) dy = yshift*(n/yshift_every) # if we're supposed to coarsen the data, do so. x = _fun.coarsen_array(xdata[n], coarsen) y = _fun.coarsen_array(ydata[n], coarsen) ey = _fun.coarsen_array(eydata[n], coarsen, 'quadrature') ex = _fun.coarsen_array(exdata[n], coarsen, 'quadrature') # update the style if not style is None: kwargs.update(next(style)) axes.errorbar(x+dx, y+dy, label=l, yerr=ey, xerr=ex, **kwargs) _pylab.xscale(xscale) _pylab.yscale(yscale) if legend: axes.legend(loc=legend) axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) # for some arguments there should be no title. if title in [None, False, 0]: axes.set_title('') # add the commands to the title else: title = str(title) history = _fun.get_shell_history() for n in range(0, min(shell_history, len(history))): title = title + "\n" + history[n].split('\n')[0].strip() title = title + '\nPlot created ' + _time.asctime() axes.set_title(title) if grid: _pylab.grid(True) if autoformat: _pt.format_figure(draw=False, modify_geometry=autoformat_window) _pt.auto_zoom(axes=axes, draw=False) # update the canvas if draw: _pylab.ion() _pylab.draw() _pylab.show() return axes
Plots specified data. Parameters ---------- xdata, ydata Arrays (or arrays of arrays) of data to plot eydata=None, exdata=None Arrays of x and y errorbar values label=None String or array of strings for the line labels xlabel='' Label for the x-axis ylabel='' Label for the y-axis title='' Title for the axes; set to None to have nothing. shell_history=0 How many commands from the pyshell history to include with the title xshift=0, yshift=0 Progressive shifts on the data, to make waterfall plots xshift_every=1 Perform the progressive shift every 1 or n'th line. yshift_every=1 perform the progressive shift every 1 or n'th line. style=None style cycle object. clear=True If no axes are specified (see below), clear the figure, otherwise clear just the axes. axes=None Which matplotlib axes to use, or "gca" for the current axes xscale='linear', yscale='linear' 'linear' or 'log' x and y axis scales. grid=False Should we draw a grid on the axes? legend='best' Where to place the legend (see pylab.legend() for options) Set this to None to ignore the legend. legend_max=20 Number of legend entries before it's truncated with '...' autoformat=True Should we format the figure for printing? autoformat_window=True Should we resize and reposition the window when autoformatting? tall=False Should the format be tall? draw=True Whether or not to draw the plot after plotting. See matplotlib's errorbar() function for additional optional keyword arguments.
def msg(self, msg=None, ret_r=False): '''code's message''' if msg or ret_r: self._msg = msg return self return self._msg
code's message
def verify_server_core(timeout=120, start_delay=90): ''' checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait ''' timestamp = time.time() last_check = time.time() + start_delay - 10 last_delay_notification = time.time() - 10 server_down = True return_val = False timeout += 1 # loop until the server is up or the timeout is reached while((time.time()-timestamp) < timeout) and server_down: # if delaying, the start of the check, print waiting to start if start_delay > 0 and time.time() - timestamp < start_delay \ and (time.time()-last_delay_notification) > 5: print("Delaying server status check until %ss. Current time: %ss" \ % (start_delay, int(time.time() - timestamp))) last_delay_notification = time.time() # send a request check every 10s until the server is up while ((time.time()-last_check) > 10) and server_down: print("Checking status of servers at %ss" % \ int((time.time()-timestamp))) last_check = time.time() try: repo = requests.get(CFG.REPOSITORY_URL) repo_code = repo.status_code print ("\t", CFG.REPOSITORY_URL, " - ", repo_code) except: repo_code = 400 print ("\t", CFG.REPOSITORY_URL, " - DOWN") try: triple = requests.get(CFG.DATA_TRIPLESTORE.url) triple_code = triple.status_code print ("\t", CFG.DATA_TRIPLESTORE.url, " - ", triple_code) except: triple_code = 400 print ("\t", CFG.DATA_TRIPLESTORE.url, " - down") if repo_code == 200 and triple_code == 200: server_down = False return_val = True print("**** Servers up at %ss" % \ int((time.time()-timestamp))) break return return_val
checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait
def process_satellites(self, helper, sess): """ check and show the good satellites """ good_satellites = helper.get_snmp_value(sess, helper, self.oids['oid_gps_satellites_good']) # Show the summary and add the metric and afterwards check the metric helper.add_summary("Good satellites: {}".format(good_satellites)) helper.add_metric(label='satellites', value=good_satellites)
check and show the good satellites
def get_rank(self, member, reverse=False, pipe=None): """ Return the rank of *member* in the collection. By default, the member with the lowest score has rank 0. If *reverse* is ``True``, the member with the highest score has rank 0. """ pipe = self.redis if pipe is None else pipe method = getattr(pipe, 'zrevrank' if reverse else 'zrank') rank = method(self.key, self._pickle(member)) return rank
Return the rank of *member* in the collection. By default, the member with the lowest score has rank 0. If *reverse* is ``True``, the member with the highest score has rank 0.
def designPrimers(p3_args, input_log=None, output_log=None, err_log=None): ''' Return the raw primer3_core output for the provided primer3 args. Returns an ordered dict of the boulderIO-format primer3 output file ''' sp = subprocess.Popen([pjoin(PRIMER3_HOME, 'primer3_core')], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) p3_args.setdefault('PRIMER_THERMODYNAMIC_PARAMETERS_PATH', pjoin(PRIMER3_HOME, 'primer3_config/')) in_str = _formatBoulderIO(p3_args) if input_log: input_log.write(in_str) input_log.flush() out_str, err_str = sp.communicate(input=in_str) if output_log: output_log.write(out_str) output_log.flush() if err_log and err_str is not None: err_log.write(err_str) err_log.flush() return _parseBoulderIO(out_str)
Return the raw primer3_core output for the provided primer3 args. Returns an ordered dict of the boulderIO-format primer3 output file
def _start_repl(api): # type: (Iota) -> None """ Starts the REPL. """ banner = ( 'IOTA API client for {uri} ({testnet}) ' 'initialized as variable `api`.\n' 'Type `help(api)` for list of API commands.'.format( testnet='testnet' if api.testnet else 'mainnet', uri=api.adapter.get_uri(), ) ) scope_vars = {'api': api} try: # noinspection PyUnresolvedReferences import IPython except ImportError: # IPython not available; use regular Python REPL. from code import InteractiveConsole InteractiveConsole(locals=scope_vars).interact(banner, '') else: print(banner) IPython.start_ipython(argv=[], user_ns=scope_vars)
Starts the REPL.
def _spec_to_matches(server_list, server_spec, mode): """ mode is in {uri, hostname, hostname_port} A list of matching server docs. Should usually be 0 or 1 matches. Multiple matches are possible though. """ assert mode in ("uri", "hostname", "hostname_port") def match(server_doc): if mode == "hostname": return server_spec == server_doc["hostname"] elif mode == "hostname_port": return server_spec == "{}:{}".format( server_doc["hostname"], server_doc["port"] ) elif mode == "uri": return server_spec == "{}://{}:{}".format( server_doc["scheme"], server_doc["hostname"], server_doc["port"] ) else: raise NotImplementedError("Unreachable error! Something is very wrong.") return [server_doc for server_doc in server_list if match(server_doc)]
mode is in {uri, hostname, hostname_port} A list of matching server docs. Should usually be 0 or 1 matches. Multiple matches are possible though.
def AddInformationalOptions(self, argument_group): """Adds the informational options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '-d', '--debug', dest='debug', action='store_true', default=False, help='Enable debug output.') argument_group.add_argument( '-q', '--quiet', dest='quiet', action='store_true', default=False, help='Disable informational output.')
Adds the informational options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def _canceling_task(self, backend): """ Used internally to decrement `backend`s current and total task counts when `backend` could not be reached. """ with self.backend_mutex: self.backends[backend] -= 1 self.task_counter[backend] -= 1
Used internally to decrement `backend`s current and total task counts when `backend` could not be reached.
def _from_p(self, mode): """Convert the image from P or PA to RGB or RGBA.""" self._check_modes(("P", "PA")) if not self.palette: raise RuntimeError("Can't convert palettized image, missing palette.") pal = np.array(self.palette) pal = da.from_array(pal, chunks=pal.shape) if pal.shape[1] == 4: # colormap's alpha overrides data alpha mode = "RGBA" alpha = None elif self.mode.endswith("A"): # add a new/fake 'bands' dimension to the end alpha = self.data.sel(bands="A").data[..., None] mode = mode + "A" if not mode.endswith("A") else mode else: alpha = None flat_indexes = self.data.sel(bands='P').data.ravel().astype('int64') dim_sizes = ((key, val) for key, val in self.data.sizes.items() if key != 'bands') dims, new_shape = zip(*dim_sizes) dims = dims + ('bands',) new_shape = new_shape + (pal.shape[1],) new_data = pal[flat_indexes].reshape(new_shape) coords = dict(self.data.coords) coords["bands"] = list(mode) if alpha is not None: new_arr = da.concatenate((new_data, alpha), axis=-1) data = xr.DataArray(new_arr, coords=coords, attrs=self.data.attrs, dims=dims) else: data = xr.DataArray(new_data, coords=coords, attrs=self.data.attrs, dims=dims) return data
Convert the image from P or PA to RGB or RGBA.
def get_tuning(instrument, description, nr_of_strings=None, nr_of_courses=None): """Get the first tuning that satisfies the constraints. The instrument and description arguments are treated like case-insensitive prefixes. So search for 'bass' is the same is 'Bass Guitar'. Example: >>> tunings.get_tuning('guitar', 'standard') <tunings.StringTuning instance at 0x139ac20> """ searchi = str.upper(instrument) searchd = str.upper(description) keys = _known.keys() for x in keys: if (searchi not in keys and x.find(searchi) == 0 or searchi in keys and x == searchi): for (desc, tun) in _known[x][1].iteritems(): if desc.find(searchd) == 0: if nr_of_strings is None and nr_of_courses is None: return tun elif nr_of_strings is not None and nr_of_courses is None: if tun.count_strings() == nr_of_strings: return tun elif nr_of_strings is None and nr_of_courses is not None: if tun.count_courses() == nr_of_courses: return tun else: if tun.count_courses() == nr_of_courses\ and tun.count_strings() == nr_of_strings: return tun
Get the first tuning that satisfies the constraints. The instrument and description arguments are treated like case-insensitive prefixes. So search for 'bass' is the same is 'Bass Guitar'. Example: >>> tunings.get_tuning('guitar', 'standard') <tunings.StringTuning instance at 0x139ac20>
def can_proceed(bound_method, check_conditions=True): """ Returns True if model in state allows to call bound_method Set ``check_conditions`` argument to ``False`` to skip checking conditions. """ if not hasattr(bound_method, '_django_fsm'): im_func = getattr(bound_method, 'im_func', getattr(bound_method, '__func__')) raise TypeError('%s method is not transition' % im_func.__name__) meta = bound_method._django_fsm im_self = getattr(bound_method, 'im_self', getattr(bound_method, '__self__')) current_state = meta.field.get_state(im_self) return meta.has_transition(current_state) and ( not check_conditions or meta.conditions_met(im_self, current_state))
Returns True if model in state allows to call bound_method Set ``check_conditions`` argument to ``False`` to skip checking conditions.
def decrease_reads_in_units( current_provisioning, units, min_provisioned_reads, log_tag): """ Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_reads: int :param min_provisioned_reads: Configured min provisioned reads :type log_tag: str :param log_tag: Prefix for the log """ updated_provisioning = int(current_provisioning) - int(units) min_provisioned_reads = __get_min_reads( current_provisioning, min_provisioned_reads, log_tag) if updated_provisioning < min_provisioned_reads: logger.info( '{0} - Reached provisioned reads min limit: {1:d}'.format( log_tag, int(min_provisioned_reads))) return min_provisioned_reads logger.debug( '{0} - Read provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_reads: int :param min_provisioned_reads: Configured min provisioned reads :type log_tag: str :param log_tag: Prefix for the log
def setMetadata(self, remote, address, key, value): """Set metadata of device""" try: return self.proxies["%s-%s" % (self._interface_id, remote)].setMetadata(address, key, value) except Exception as err: LOG.debug("ServerThread.setMetadata: Exception: %s" % str(err))
Set metadata of device
def set_content_model(self): """ Set content_model to the child class's related name, or None if this is the base class. """ if not self.content_model: is_base_class = ( base_concrete_model(ContentTyped, self) == self.__class__) self.content_model = ( None if is_base_class else self.get_content_model_name())
Set content_model to the child class's related name, or None if this is the base class.
def _set_keychain(self, v, load=False): """ Setter method for keychain, mapped from YANG variable /keychain (list) If this variable is read-only (config: false) in the source YANG file, then _set_keychain is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keychain() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name_of_keychain",keychain.keychain, yang_name="keychain", rest_name="keychain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-of-keychain', extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}), is_container='list', yang_name="keychain", rest_name="keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """keychain must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name_of_keychain",keychain.keychain, yang_name="keychain", rest_name="keychain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-of-keychain', extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}), is_container='list', yang_name="keychain", rest_name="keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='list', is_config=True)""", }) self.__keychain = t if hasattr(self, '_set'): self._set()
Setter method for keychain, mapped from YANG variable /keychain (list) If this variable is read-only (config: false) in the source YANG file, then _set_keychain is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keychain() directly.
def class_check_para(**kw): """ force check accept and return, decorator, @class_check_para(accept=, returns=, mail=) :param kw: :return: """ try: def decorator(f): def new_f(*args): if "accepts" in kw: assert len(args) == len(kw["accepts"]) + 1 arg_types = tuple(map(type, args[1:])) if arg_types != kw["accepts"]: msg = decorator_info(f.__name__, kw["accepts"], arg_types, 0) print('TypeWarning: ', msg) raise TypeError(msg) result = f(*args) if "returns" in kw: res_type = type(result) if res_type != kw["returns"]: msg = decorator_info(f.__name__, (kw["returns"],), (res_type,), 1) print('TypeWarning: ', msg) raise TypeError(msg) return result new_f.__name__ = f.__name__ return new_f return decorator except KeyError as ke: raise KeyError(ke.message + "is not a valid keyword argument") except TypeError as te: raise TypeError(te.message)
force check accept and return, decorator, @class_check_para(accept=, returns=, mail=) :param kw: :return:
def from_buffer(self, buf): """ Identify the contents of `buf` """ with self.lock: try: # if we're on python3, convert buf to bytes # otherwise this string is passed as wchar* # which is not what libmagic expects if isinstance(buf, str) and str != bytes: buf = buf.encode('utf-8', errors='replace') return maybe_decode(magic_buffer(self.cookie, buf)) except MagicException as e: return self._handle509Bug(e)
Identify the contents of `buf`
def set_app_os_tag(self, os_tag, app_tag, update_os, update_app): """Update the app and/or os tags.""" update_os = bool(update_os) update_app = bool(update_app) if update_os: self.os_info = _unpack_version(os_tag) if update_app: self.app_info = _unpack_version(app_tag) return [Error.NO_ERROR]
Update the app and/or os tags.
def convert_language_code(django_lang): """ Converts Django language codes "ll-cc" into ISO codes "ll_CC" or "ll" :param django_lang: Django language code as ll-cc :type django_lang: str :return: ISO language code as ll_CC :rtype: str """ lang_and_country = django_lang.split('-') try: return '_'.join((lang_and_country[0], lang_and_country[1].upper())) except IndexError: return lang_and_country[0]
Converts Django language codes "ll-cc" into ISO codes "ll_CC" or "ll" :param django_lang: Django language code as ll-cc :type django_lang: str :return: ISO language code as ll_CC :rtype: str
def handle_market_close(self, dt, data_portal): """Handles the close of the given day. Parameters ---------- dt : Timestamp The most recently completed simulation datetime. data_portal : DataPortal The current data portal. Returns ------- A daily perf packet. """ completed_session = self._current_session if self.emission_rate == 'daily': # this method is called for both minutely and daily emissions, but # this chunk of code here only applies for daily emissions. (since # it's done every minute, elsewhere, for minutely emission). self.sync_last_sale_prices(dt, data_portal) session_ix = self._session_count # increment the day counter before we move markers forward. self._session_count += 1 packet = { 'period_start': self._first_session, 'period_end': self._last_session, 'capital_base': self._capital_base, 'daily_perf': { 'period_open': self._market_open, 'period_close': dt, }, 'cumulative_perf': { 'period_open': self._first_session, 'period_close': self._last_session, }, 'progress': self._progress(self), 'cumulative_risk_metrics': {}, } ledger = self._ledger ledger.end_of_session(session_ix) self.end_of_session( packet, ledger, completed_session, session_ix, data_portal, ) return packet
Handles the close of the given day. Parameters ---------- dt : Timestamp The most recently completed simulation datetime. data_portal : DataPortal The current data portal. Returns ------- A daily perf packet.
def _centroids(n_clusters: int, points: List[List[float]]) -> List[List[float]]: """ Return n_clusters centroids of points """ k_means = KMeans(n_clusters=n_clusters) k_means.fit(points) closest, _ = pairwise_distances_argmin_min(k_means.cluster_centers_, points) return list(map(list, np.array(points)[closest.tolist()]))
Return n_clusters centroids of points
def ls(args): """ lexibank ls [COLS]+ column specification: - license - lexemes - macroareas """ db = Database(args.db) db.create(exists_ok=True) in_db = {r[0]: r[1] for r in db.fetchall('select id, version from dataset')} # FIXME: how to smartly choose columns? table = Table('ID', 'Title') cols = OrderedDict([ (col, {}) for col in args.args if col in [ 'version', 'location', 'changes', 'license', 'all_lexemes', 'lexemes', 'concepts', 'languages', 'families', 'varieties', 'macroareas', ]]) tl = 40 if cols: tl = 25 table.columns.extend(col.capitalize() for col in cols) for col, sql in [ ('languages', 'glottocodes_by_dataset'), ('concepts', 'conceptsets_by_dataset'), ('lexemes', 'mapped_lexemes_by_dataset'), ('all_lexemes', 'lexemes_by_dataset'), ('macroareas', 'macroareas_by_dataset'), ('families', 'families_by_dataset'), ]: if col in cols: cols[col] = {r[0]: r[1] for r in db.fetchall(sql)} for ds in args.cfg.datasets: row = [ colored(ds.id, 'green' if ds.id in in_db else 'red'), truncate_with_ellipsis(ds.metadata.title or '', width=tl), ] for col in cols: if col == 'version': row.append(git_hash(ds.dir)) elif col == 'location': row.append(colored(str(ds.dir), 'green')) elif col == 'changes': row.append(ds.git_repo.is_dirty()) elif col == 'license': lic = licenses.find(ds.metadata.license or '') row.append(lic.id if lic else ds.metadata.license) elif col in ['languages', 'concepts', 'lexemes', 'all_lexemes', 'families']: row.append(float(cols[col].get(ds.id, 0))) elif col == 'macroareas': row.append(', '.join(sorted((cols[col].get(ds.id) or '').split(',')))) else: row.append('') table.append(row) totals = ['zztotal', len(args.cfg.datasets)] for i, col in enumerate(cols): if col in ['lexemes', 'all_lexemes']: totals.append(sum([r[i + 2] for r in table])) elif col == 'languages': totals.append(float(db.fetchone( "SELECT count(distinct glottocode) FROM languagetable")[0])) elif col == 'concepts': totals.append(float(db.fetchone( "SELECT count(distinct concepticon_id) FROM parametertable")[0])) elif col == 'families': totals.append(float(db.fetchone( "SELECT count(distinct family) FROM languagetable")[0])) else: totals.append('') table.append(totals) print(table.render( tablefmt='simple', sortkey=lambda r: r[0], condensed=False, floatfmt=',.0f'))
lexibank ls [COLS]+ column specification: - license - lexemes - macroareas
def find_le(a, x): """Find rightmost value less than or equal to x.""" i = bs.bisect_right(a, x) if i: return i - 1 raise ValueError
Find rightmost value less than or equal to x.
def solveConsMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac, MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool): ''' Solves a single period consumption-saving problem with risky income and stochastic transitions between discrete states, in a Markov fashion. Has identical inputs as solveConsIndShock, except for a discrete Markov transitionrule MrkvArray. Markov states can differ in their interest factor, permanent growth factor, and income distribution, so the inputs Rfree, PermGroFac, and IncomeDstn are arrays or lists specifying those values in each (succeeding) Markov state. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn_list : [[np.array]] A length N list of income distributions in each succeeding Markov state. Each income distribution contains three arrays of floats, representing a discrete approximation to the income process at the beginning of the succeeding period. Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree_list : np.array Risk free interest factor on end-of-period assets for each Markov state in the succeeding period. PermGroGac_list : float Expected permanent income growth factor at the end of this period for each Markov state in the succeeding period. MrkvArray : numpy.array An NxN array representing a Markov transition matrix between discrete states. The i,j-th element of MrkvArray is the probability of moving from state i in period t to state j in period t+1. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean An indicator for whether the solver should use cubic or linear inter- polation. Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marg- inal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. All of these attributes are lists or arrays, with elements corresponding to the current Markov state. E.g. solution.cFunc[0] is the consumption function when in the i=0 Markov state this period. ''' solver = ConsMarkovSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree, PermGroFac,MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool) solution_now = solver.solve() return solution_now
Solves a single period consumption-saving problem with risky income and stochastic transitions between discrete states, in a Markov fashion. Has identical inputs as solveConsIndShock, except for a discrete Markov transitionrule MrkvArray. Markov states can differ in their interest factor, permanent growth factor, and income distribution, so the inputs Rfree, PermGroFac, and IncomeDstn are arrays or lists specifying those values in each (succeeding) Markov state. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn_list : [[np.array]] A length N list of income distributions in each succeeding Markov state. Each income distribution contains three arrays of floats, representing a discrete approximation to the income process at the beginning of the succeeding period. Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree_list : np.array Risk free interest factor on end-of-period assets for each Markov state in the succeeding period. PermGroGac_list : float Expected permanent income growth factor at the end of this period for each Markov state in the succeeding period. MrkvArray : numpy.array An NxN array representing a Markov transition matrix between discrete states. The i,j-th element of MrkvArray is the probability of moving from state i in period t to state j in period t+1. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean An indicator for whether the solver should use cubic or linear inter- polation. Returns ------- solution : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marg- inal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal marginal value function vPPfunc. All of these attributes are lists or arrays, with elements corresponding to the current Markov state. E.g. solution.cFunc[0] is the consumption function when in the i=0 Markov state this period.
def render(self, request, **kwargs): """ Renders this view. Adds cancel_url to the context. If the request get parameters contains 'popup' then the `render_type` is set to 'popup'. """ if request.GET.get('popup'): self.render_type = 'popup' kwargs['popup'] = 1 kwargs['cancel_url'] = self.get_cancel_url() if not self.object: kwargs['single_title'] = True return super(FormView, self).render(request, **kwargs)
Renders this view. Adds cancel_url to the context. If the request get parameters contains 'popup' then the `render_type` is set to 'popup'.
def _get_value(obj, key): """Get a value for 'key' from 'obj', if possible""" if isinstance(obj, (list, tuple)): for item in obj: v = _find_value(key, item) if v is not None: return v return None if isinstance(obj, dict): return obj.get(key) if obj is not None: return getattr(obj, key, None)
Get a value for 'key' from 'obj', if possible
def do_connect(self, arg): ''' Connect to the arm. ''' if self.arm.is_connected(): print(self.style.error('Error: ', 'Arm is already connected.')) else: try: port = self.arm.connect() print(self.style.success('Success: ', 'Connected to \'{}\'.'.format(port))) except r12.ArmException as e: print(self.style.error('Error: ', str(e)))
Connect to the arm.
def progress_bar_wrapper(iterable, **kwargs): ''' Wrapper that applies tqdm progress bar conditional on config settings. ''' return tqdm(iterable, **kwargs) if (config.get_option('progress_bar') and not isinstance(iterable, tqdm)) else iterable
Wrapper that applies tqdm progress bar conditional on config settings.
def export(self, elec_file): """Export channel name and location to file. Parameters ---------- elec_file : Path or str path to file where to save csv """ elec_file = Path(elec_file) if elec_file.suffix == '.csv': sep = ', ' elif elec_file.suffix == '.sfp': sep = ' ' with elec_file.open('w') as f: for one_chan in self.chan: values = ([one_chan.label, ] + ['{:.3f}'.format(x) for x in one_chan.xyz]) line = sep.join(values) + '\n' f.write(line)
Export channel name and location to file. Parameters ---------- elec_file : Path or str path to file where to save csv
def setup_users_signals(self, ): """Setup the signals for the users page :returns: None :rtype: None :raises: None """ log.debug("Setting up users page signals.") self.users_user_view_pb.clicked.connect(self.users_view_user) self.users_user_create_pb.clicked.connect(self.create_user)
Setup the signals for the users page :returns: None :rtype: None :raises: None
def set_terminal_converted(self, attr, repr_value): """ Converts the given representation value and sets the specified attribute value to the converted value. :param attr: Attribute to set. :param str repr_value: String value of the attribute to set. """ value = self.converter_registry.convert_from_representation( repr_value, attr.value_type) self.data[attr.repr_name] = value
Converts the given representation value and sets the specified attribute value to the converted value. :param attr: Attribute to set. :param str repr_value: String value of the attribute to set.
def parseline(line,format): """\ Given a line (a string actually) and a short string telling how to format it, return a list of python objects that result. The format string maps words (as split by line.split()) into python code: x -> Nothing; skip this word s -> Return this word as a string i -> Return this word as an int d -> Return this word as an int f -> Return this word as a float Basic parsing of strings: >>> parseline('Hello, World','ss') ['Hello,', 'World'] You can use 'x' to skip a record; you also don't have to parse every record: >>> parseline('1 2 3 4','xdd') [2, 3] >>> parseline('C1 0.0 0.0 0.0','sfff') ['C1', 0.0, 0.0, 0.0] Should this return an empty list? >>> parseline('This line wont be parsed','xx') """ xlat = {'x':None,'s':str,'f':float,'d':int,'i':int} result = [] words = line.split() for i in range(len(format)): f = format[i] trans = xlat.get(f,None) if trans: result.append(trans(words[i])) if len(result) == 0: return None if len(result) == 1: return result[0] return result
\ Given a line (a string actually) and a short string telling how to format it, return a list of python objects that result. The format string maps words (as split by line.split()) into python code: x -> Nothing; skip this word s -> Return this word as a string i -> Return this word as an int d -> Return this word as an int f -> Return this word as a float Basic parsing of strings: >>> parseline('Hello, World','ss') ['Hello,', 'World'] You can use 'x' to skip a record; you also don't have to parse every record: >>> parseline('1 2 3 4','xdd') [2, 3] >>> parseline('C1 0.0 0.0 0.0','sfff') ['C1', 0.0, 0.0, 0.0] Should this return an empty list? >>> parseline('This line wont be parsed','xx')
def free_symbols(self): """Set of free SymPy symbols contained within the expression.""" if self._free_symbols is None: if len(self._vals) == 0: self._free_symbols = self.operand.free_symbols else: dummy_map = {} for sym in self._vals.keys(): dummy_map[sym] = sympy.Dummy() # bound symbols may not be atomic, so we have to replace them # with dummies self._free_symbols = { sym for sym in self.operand.substitute(dummy_map).free_symbols if not isinstance(sym, sympy.Dummy)} for val in self._vals.values(): self._free_symbols.update(val.free_symbols) return self._free_symbols
Set of free SymPy symbols contained within the expression.
def delete_communication_channel_id(self, id, user_id): """ Delete a communication channel. Delete an existing communication channel. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/users/{user_id}/communication_channels/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/communication_channels/{id}".format(**path), data=data, params=params, single_item=True)
Delete a communication channel. Delete an existing communication channel.
def string_to_sign(self, http_request): """ Return the canonical StringToSign as well as a dict containing the original version of all headers that were included in the StringToSign. """ headers_to_sign = self.headers_to_sign(http_request) canonical_headers = self.canonical_headers(headers_to_sign) string_to_sign = '\n'.join([http_request.method, http_request.path, '', canonical_headers, '', http_request.body]) return string_to_sign, headers_to_sign
Return the canonical StringToSign as well as a dict containing the original version of all headers that were included in the StringToSign.
def config_string_to_dict(string, result=None): """ Convert a given configuration string :: key_1=value_1|key_2=value_2|...|key_n=value_n into the corresponding dictionary :: dictionary[key_1] = value_1 dictionary[key_2] = value_2 ... dictionary[key_n] = value_n :param string string: the configuration string :rtype: dict """ if string is None: return {} pairs = string.split(gc.CONFIG_STRING_SEPARATOR_SYMBOL) return pairs_to_dict(pairs, result)
Convert a given configuration string :: key_1=value_1|key_2=value_2|...|key_n=value_n into the corresponding dictionary :: dictionary[key_1] = value_1 dictionary[key_2] = value_2 ... dictionary[key_n] = value_n :param string string: the configuration string :rtype: dict
def captcha_transmit(self, captcha, uuid): """Delayed transmission of a requested captcha""" self.log('Transmitting captcha') response = { 'component': 'hfos.enrol.enrolmanager', 'action': 'captcha', 'data': b64encode(captcha['image'].getvalue()).decode('utf-8') } self.fire(send(uuid, response))
Delayed transmission of a requested captcha
def hide(self, eid, index=0): """ Hide the element with the matching eid. If no match, look for an element with a matching rid. """ elems = None if eid in self.__element_ids: elems = self.__element_ids[eid] elif eid in self.__repeat_ids: elems = self.__repeat_ids[eid] if elems and index < len(elems): elem = elems[index] elem.parent.children.remove(elem)
Hide the element with the matching eid. If no match, look for an element with a matching rid.
def list_recent_networks(self) -> List[Network]: """List the most recently created version of each network (by name).""" most_recent_times = ( self.session .query( Network.name.label('network_name'), func.max(Network.created).label('max_created') ) .group_by(Network.name) .subquery('most_recent_times') ) and_condition = and_( most_recent_times.c.network_name == Network.name, most_recent_times.c.max_created == Network.created ) most_recent_networks = self.session.query(Network).join(most_recent_times, and_condition) return most_recent_networks.all()
List the most recently created version of each network (by name).
def create(**data): """ Create a customer. :param data: data required to create the customer :return: The customer resource :rtype resources.Customer """ http_client = HttpClient() response, _ = http_client.post(routes.url(routes.CUSTOMER_RESOURCE), data) return resources.Customer(**response)
Create a customer. :param data: data required to create the customer :return: The customer resource :rtype resources.Customer
def get_filename(key, message, default=None, history=None): """ Like :meth:`prompt`, but only accepts the name of an existing file as an input. :type key: str :param key: The key under which to store the input in the :class:`InputHistory`. :type message: str :param message: The user prompt. :type default: str|None :param default: The offered default if none was found in the history. :type history: :class:`InputHistory` or None :param history: The history used for recording default values, or None. """ def _validate(string): if not os.path.isfile(string): return 'File not found. Please enter a filename.' return prompt(key, message, default, True, _validate, history)
Like :meth:`prompt`, but only accepts the name of an existing file as an input. :type key: str :param key: The key under which to store the input in the :class:`InputHistory`. :type message: str :param message: The user prompt. :type default: str|None :param default: The offered default if none was found in the history. :type history: :class:`InputHistory` or None :param history: The history used for recording default values, or None.
def show_in_external_file_explorer(fnames=None): """Show files in external file explorer Args: fnames (list): Names of files to show. """ if not isinstance(fnames, (tuple, list)): fnames = [fnames] for fname in fnames: open_file_in_external_explorer(fname)
Show files in external file explorer Args: fnames (list): Names of files to show.
def log_once(log_func, msg, *args, **kwargs): """"Logs a message only once.""" if msg not in _LOG_ONCE_SEEN: log_func(msg, *args, **kwargs) # Key on the message, ignoring args. This should fit most use cases. _LOG_ONCE_SEEN.add(msg)
Logs a message only once.
def close(self): """Closes the connection.""" if not self._closed: self._closed = True self.client.close()
Closes the connection.
def search(query=None, catalog=None): """Search """ if query is None: query = make_query(catalog) if query is None: return [] return api.search(query, catalog=catalog)
Search
def cross_list(*sequences): """ From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html """ result = [[ ]] for seq in sequences: result = [sublist+[item] for sublist in result for item in seq] return result
From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html
def play_game(game, *players): """Play an n-person, move-alternating game. >>> play_game(Fig52Game(), alphabeta_player, alphabeta_player) 3 """ state = game.initial while True: for player in players: move = player(game, state) state = game.result(state, move) if game.terminal_test(state): return game.utility(state, game.to_move(game.initial))
Play an n-person, move-alternating game. >>> play_game(Fig52Game(), alphabeta_player, alphabeta_player) 3
def _get_pltdotstrs(self, hdrgos_usr, **kws): """Plot GO DAGs for each group found under a specfied header GO.""" import datetime import timeit dotstrs_all = [] tic = timeit.default_timer() # Loop through GO groups. Each group of GOs is formed under a single "header GO" hdrgo2usrgos, go2obj = self._get_plt_data(hdrgos_usr) # get dot strings with _get_dotstrs_curs for hdrgo, usrgos in hdrgo2usrgos.items(): dotstrs_cur = self._get_dotgraphs( hdrgo, usrgos, pltargs=PltGroupedGosArgs(self.grprobj, **kws), go2parentids=get_go2parents_go2obj(go2obj)) dotstrs_all.extend(dotstrs_cur) sys.stdout.write("\nElapsed HMS: {HMS} to write ".format( HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic))))) sys.stdout.write("{P:5,} GO DAG plots for {H:>5,} GO grouping headers\n".format( H=len(hdrgo2usrgos), P=len(dotstrs_all))) return sorted(set(dotstrs_all))
Plot GO DAGs for each group found under a specfied header GO.
def add_to_batch(self, batch): ''' Adds paths to the given batch object. They are all added as GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL primitive. ''' for name in self.paths: svg_path = self.paths[name] svg_path.add_to_batch(batch)
Adds paths to the given batch object. They are all added as GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL primitive.
def get_connections(self, id, connection_name, **args): """Fetches the connections for given object.""" return self.request( "{0}/{1}/{2}".format(self.version, id, connection_name), args )
Fetches the connections for given object.
def stop_capture(self, port_number): """ Stops a packet capture. :param port_number: allocated port number """ if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]: raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) if port_number not in self._nios: raise NodeError("Port {} is not connected".format(port_number)) nio = self._nios[port_number] nio.stopPacketCapture() bridge_name = "{}-{}".format(self._id, port_number) yield from self._ubridge_send("bridge stop_capture {name}".format(name=bridge_name)) log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number))
Stops a packet capture. :param port_number: allocated port number
def modsplit(s): """Split importable""" if ':' in s: c = s.split(':') if len(c) != 2: raise ValueError("Syntax error: {s}") return c[0], c[1] else: c = s.split('.') if len(c) < 2: raise ValueError("Syntax error: {s}") return '.'.join(c[:-1]), c[-1]
Split importable
def p_obs(self, obs, out=None): """ Returns the output probabilities for an entire trajectory and all hidden states Parameters ---------- obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the probability of generating the symbol at time point t from any of the N hidden states """ if out is None: out = self._output_probabilities[:, obs].T # out /= np.sum(out, axis=1)[:,None] return self._handle_outliers(out) else: if obs.shape[0] == out.shape[0]: np.copyto(out, self._output_probabilities[:, obs].T) elif obs.shape[0] < out.shape[0]: out[:obs.shape[0], :] = self._output_probabilities[:, obs].T else: raise ValueError('output array out is too small: '+str(out.shape[0])+' < '+str(obs.shape[0])) # out /= np.sum(out, axis=1)[:,None] return self._handle_outliers(out)
Returns the output probabilities for an entire trajectory and all hidden states Parameters ---------- obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the probability of generating the symbol at time point t from any of the N hidden states
def softmax(self, params): ''' Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit ''' default_tau = 0.1 if params and type(params) == dict: tau = params.get('tau') try: float(tau) except ValueError: 'slots: softmax: Setting tau to default' tau = default_tau else: tau = default_tau # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: payouts = self.wins / (self.pulls + 0.1) norm = sum(np.exp(payouts/tau)) ps = np.exp(payouts/tau)/norm # Randomly choose index based on CMF cmf = [sum(ps[:i+1]) for i in range(len(ps))] rand = np.random.rand() found = False found_i = None i = 0 while not found: if rand < cmf[i]: found_i = i found = True else: i += 1 return found_i
Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit
def sample(self): """ Compute new samples. """ self._sampling = True try: if self.is_raw_perf_class and not self._previous_sample: self._current_sample = self._query() self._previous_sample = self._current_sample self._current_sample = self._query() except TimeoutException: self.logger.debug(u"Query timeout after {timeout}s".format(timeout=self._timeout_duration)) raise else: self._sampling = False
Compute new samples.
def format_records(records): """Serialise multiple records""" formatted = list() for record_ in records: formatted.append(format_record(record_)) return formatted
Serialise multiple records
def get_typ(self, refobj): """Return the entity type of the given reftrack node See: :data:`MayaRefobjInterface.types`. :param refobj: the reftrack node to query :type refobj: str :returns: the entity type :rtype: str :raises: ValueError """ enum = cmds.getAttr("%s.type" % refobj) try: return JB_ReftrackNode.types[enum] except IndexError: raise ValueError("The type on the node %s could not be associated with an available type: %s" % (refobj, JB_ReftrackNode.types))
Return the entity type of the given reftrack node See: :data:`MayaRefobjInterface.types`. :param refobj: the reftrack node to query :type refobj: str :returns: the entity type :rtype: str :raises: ValueError
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): ''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values) adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False) adjusted_values = self._round_floats_to_ints(values=adjusted_values) # Obtain Ascii Graph String field = self._get_ascii_field(adjusted_values) graph_string = self._draw_ascii_graph(field=field) # Label the graph if label: top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char) result += top_label + '\n' result += '{graph_string}\n'.format(graph_string=graph_string) if label: lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len(lower) - len(stats) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime: fill_length = max_width - len(start_ctime) - len(end_ctime) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
def generate_configurations(*, guided=False, fresh_start=False, save=False): """ If a config file is found in the standard locations, it will be loaded and the config data would be retuned. If not found, then generate the data on the fly, and return it """ if fresh_start: purge_configs() loaded_status, loaded_data = get_config() if loaded_status != CONFIG_VALID: if save: make_config_file(guided=guided) status, config_data = get_config() else: config_data = make_config_data(guided=guided) else: config_data = loaded_data return config_data
If a config file is found in the standard locations, it will be loaded and the config data would be retuned. If not found, then generate the data on the fly, and return it
def remove_prefix(self, args): """ Remove a prefix. Valid keys in the `args`-struct: * `auth` [struct] Authentication options passed to the :class:`AuthFactory`. * `prefix` [struct] Attributes used to select what prefix to remove. * `recursive` [boolean] When set to 1, also remove child prefixes. """ try: return self.nip.remove_prefix(args.get('auth'), args.get('prefix'), args.get('recursive')) except (AuthError, NipapError) as exc: self.logger.debug(unicode(exc)) raise Fault(exc.error_code, unicode(exc))
Remove a prefix. Valid keys in the `args`-struct: * `auth` [struct] Authentication options passed to the :class:`AuthFactory`. * `prefix` [struct] Attributes used to select what prefix to remove. * `recursive` [boolean] When set to 1, also remove child prefixes.
def extract_features(self, data_frame, pre=''): """ This method extracts all the features available to the Tremor Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, \ bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, \ magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, \ magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, \ magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, \ magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, \ magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, \ magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, \ magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, \ magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, \ magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, \ magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, \ magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, \ magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, \ magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values :rtype: list """ try: magnitude_partial_autocorrelation = self.partial_autocorrelation(data_frame.mag_sum_acc) magnitude_agg_linear = self.agg_linear_trend(data_frame.mag_sum_acc) magnitude_spkt_welch_density = self.spkt_welch_density(data_frame.mag_sum_acc) magnitude_fft_coefficient = self.fft_coefficient(data_frame.mag_sum_acc) return {pre+'amplitude_by_fft': self.amplitude(data_frame)[0], pre+'frequency_by_fft': self.amplitude(data_frame)[1], pre+'amplitude_by_welch': self.amplitude(data_frame, 'welch')[0], pre+'frequency_by_welch': self.amplitude(data_frame, 'welch')[1], pre+'bradykinesia_amplitude_by_fft': self.bradykinesia(data_frame)[0], pre+'bradykinesia_frequency_by_fft': self.bradykinesia(data_frame)[1], pre+'bradykinesia_amplitude_by_welch': self.bradykinesia(data_frame, 'welch')[0], pre+'bradykinesia_frequency_by_welch': self.bradykinesia(data_frame, 'welch')[1], pre+'magnitude_approximate_entropy': self.approximate_entropy(data_frame.mag_sum_acc), pre+'magnitude_autocorrelation_lag_8': self.autocorrelation(data_frame.mag_sum_acc, 8), pre+'magnitude_autocorrelation_lag_9': self.autocorrelation(data_frame.mag_sum_acc, 9), pre+'magnitude_partial_autocorrelation_lag_3': magnitude_partial_autocorrelation[0][1], pre+'magnitude_partial_autocorrelation_lag_5': magnitude_partial_autocorrelation[1][1], pre+'magnitude_partial_autocorrelation_lag_6': magnitude_partial_autocorrelation[2][1], pre+'magnitude_minimum': self.minimum(data_frame.mag_sum_acc), pre+'magnitude_mean': self.mean(data_frame.mag_sum_acc), pre+'magnitude_ratio_value_number_to_time_series_length': self.ratio_value_number_to_time_series_length(data_frame.mag_sum_acc), pre+'magnitude_change_quantiles': self.change_quantiles(data_frame.mag_sum_acc), pre+'magnitude_number_peaks': self.number_peaks(data_frame.mag_sum_acc), pre+'magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept': magnitude_agg_linear[0][1], pre+'magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue': magnitude_agg_linear[1][1], pre+'magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept': magnitude_agg_linear[2][1], pre+'magnitude_spkt_welch_density_coeff_2': magnitude_spkt_welch_density[0][1], pre+'magnitude_spkt_welch_density_coeff_5': magnitude_spkt_welch_density[1][1], pre+'magnitude_spkt_welch_density_coeff_8': magnitude_spkt_welch_density[2][1], pre+'magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints': self.percentage_of_reoccurring_datapoints_to_all_datapoints(data_frame.mag_sum_acc), pre+'magnitude_abs_energy': self.abs_energy(data_frame.mag_sum_acc), pre+'magnitude_fft_aggregated_centroid': self.fft_aggregated(data_frame.mag_sum_acc)[0][1], pre+'magnitude_fft_coefficient_abs_coeff_44': magnitude_fft_coefficient[0][1], pre+'magnitude_fft_coefficient_abs_coeff_63': magnitude_fft_coefficient[1][1], pre+'magnitude_fft_coefficient_abs_coeff_0': magnitude_fft_coefficient[2][1], pre+'magnitude_fft_coefficient_real_coeff_0': magnitude_fft_coefficient[3][1], pre+'magnitude_fft_coefficient_real_coeff_23': magnitude_fft_coefficient[4][1], pre+'magnitude_sum_values': self.sum_values(data_frame.mag_sum_acc)} except: logging.error("Error on TremorProcessor process, extract features: %s", sys.exc_info()[0])
This method extracts all the features available to the Tremor Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, \ bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, \ magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, \ magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, \ magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, \ magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, \ magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, \ magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, \ magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, \ magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, \ magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, \ magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, \ magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, \ magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, \ magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values :rtype: list
def _dictToAlignments(self, blastDict, read): """ Take a dict (made by XMLRecordsReader._convertBlastRecordToDict) and convert it to a list of alignments. @param blastDict: A C{dict}, from convertBlastRecordToDict. @param read: A C{Read} instance, containing the read that BLAST used to create this record. @raise ValueError: If the query id in the BLAST dictionary does not match the id of the read. @return: A C{list} of L{dark.alignment.Alignment} instances. """ if (blastDict['query'] != read.id and blastDict['query'].split()[0] != read.id): raise ValueError( 'The reads you have provided do not match the BLAST output: ' 'BLAST record query id (%s) does not match the id of the ' 'supposedly corresponding read (%s).' % (blastDict['query'], read.id)) alignments = [] getScore = itemgetter('bits' if self._hspClass is HSP else 'expect') for blastAlignment in blastDict['alignments']: alignment = Alignment(blastAlignment['length'], blastAlignment['title']) alignments.append(alignment) for blastHsp in blastAlignment['hsps']: score = getScore(blastHsp) normalized = normalizeHSP(blastHsp, len(read), self.application) hsp = self._hspClass( score, readStart=normalized['readStart'], readEnd=normalized['readEnd'], readStartInSubject=normalized['readStartInSubject'], readEndInSubject=normalized['readEndInSubject'], readFrame=blastHsp['frame'][0], subjectStart=normalized['subjectStart'], subjectEnd=normalized['subjectEnd'], subjectFrame=blastHsp['frame'][1], readMatchedSequence=blastHsp['query'], subjectMatchedSequence=blastHsp['sbjct'], # Use blastHsp.get on identicalCount and positiveCount # because they were added in version 2.0.3 and will not # be present in any of our JSON output generated before # that. Those values will be None for those JSON files, # but that's much better than no longer being able to # read all that data. identicalCount=blastHsp.get('identicalCount'), positiveCount=blastHsp.get('positiveCount')) alignment.addHsp(hsp) return alignments
Take a dict (made by XMLRecordsReader._convertBlastRecordToDict) and convert it to a list of alignments. @param blastDict: A C{dict}, from convertBlastRecordToDict. @param read: A C{Read} instance, containing the read that BLAST used to create this record. @raise ValueError: If the query id in the BLAST dictionary does not match the id of the read. @return: A C{list} of L{dark.alignment.Alignment} instances.
def list_available_genomes(provider=None): """ List all available genomes. Parameters ---------- provider : str, optional List genomes from specific provider. Genomes from all providers will be returned if not specified. Returns ------- list with genome names """ if provider: providers = [ProviderBase.create(provider)] else: # if provider is not specified search all providers providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.list_available_genomes(): yield [p.name] + list(row)
List all available genomes. Parameters ---------- provider : str, optional List genomes from specific provider. Genomes from all providers will be returned if not specified. Returns ------- list with genome names
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceRealTimeStatisticsContext for this WorkspaceRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_real_time_statistics.WorkspaceRealTimeStatisticsContext """ if self._context is None: self._context = WorkspaceRealTimeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceRealTimeStatisticsContext for this WorkspaceRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_real_time_statistics.WorkspaceRealTimeStatisticsContext
def _internal_kv_get(key): """Fetch the value of a binary key.""" worker = ray.worker.get_global_worker() if worker.mode == ray.worker.LOCAL_MODE: return _local.get(key) return worker.redis_client.hget(key, "value")
Fetch the value of a binary key.
def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r
Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object
def _get_LMv2_response(user_name, password, domain_name, server_challenge, client_challenge): """ [MS-NLMP] v28.0 2016-07-14 2.2.2.4 LMv2_RESPONSE The LMv2_RESPONSE structure defines the NTLM v2 authentication LmChallengeResponse in the AUTHENTICATE_MESSAGE. This response is used only when NTLM v2 authentication is configured. :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :param client_challenge: A random 8-byte response generated by the client for the AUTHENTICATE_MESSAGE :return response: LmChallengeResponse to the server challenge """ nt_hash = comphash._ntowfv2(user_name, password, domain_name) lm_hash = hmac.new(nt_hash, (server_challenge + client_challenge)).digest() response = lm_hash + client_challenge return response
[MS-NLMP] v28.0 2016-07-14 2.2.2.4 LMv2_RESPONSE The LMv2_RESPONSE structure defines the NTLM v2 authentication LmChallengeResponse in the AUTHENTICATE_MESSAGE. This response is used only when NTLM v2 authentication is configured. :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :param client_challenge: A random 8-byte response generated by the client for the AUTHENTICATE_MESSAGE :return response: LmChallengeResponse to the server challenge
def width(self, value): """gets/sets the width""" if self._width != value and \ isinstance(value, (int, float, long)): self._width = value
gets/sets the width
def reset_tip_tracking(self): """ Resets the :any:`Pipette` tip tracking, "refilling" the tip racks """ self.current_tip(None) self.tip_rack_iter = iter([]) if self.has_tip_rack(): iterables = self.tip_racks if self.channels > 1: iterables = [c for rack in self.tip_racks for c in rack.cols] else: iterables = [w for rack in self.tip_racks for w in rack] if self.starting_tip: iterables = iterables[iterables.index(self.starting_tip):] self.tip_rack_iter = itertools.chain(iterables)
Resets the :any:`Pipette` tip tracking, "refilling" the tip racks
def extract_fragment(self, iri: str) -> str: ''' Pulls only for code/ID from the iri I only add the str() conversion for the iri because rdflib objects need to be converted. ''' fragment = str(iri).rsplit('/')[-1].split(':', 1)[-1].split('#', 1)[-1].split('_', 1)[-1] return fragment
Pulls only for code/ID from the iri I only add the str() conversion for the iri because rdflib objects need to be converted.
def run(cls, command, cwd=".", **kwargs): """ Make a subprocess call, collect its output and returncode. Returns CommandResult instance as ValueObject. """ assert isinstance(command, six.string_types) command_result = CommandResult() command_result.command = command use_shell = cls.USE_SHELL if "shell" in kwargs: use_shell = kwargs.pop("shell") # -- BUILD COMMAND ARGS: if six.PY2 and isinstance(command, six.text_type): # -- PREPARE-FOR: shlex.split() # In PY2, shlex.split() requires bytes string (non-unicode). # In PY3, shlex.split() accepts unicode string. command = codecs.encode(command, "utf-8") cmdargs = shlex.split(command) # -- TRANSFORM COMMAND (optional) command0 = cmdargs[0] real_command = cls.COMMAND_MAP.get(command0, None) if real_command: cmdargs0 = real_command.split() cmdargs = cmdargs0 + cmdargs[1:] preprocessors = cls.PREPROCESSOR_MAP.get(command0) if preprocessors: cmdargs = cls.preprocess_command(preprocessors, cmdargs, command, cwd) # -- RUN COMMAND: try: process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=use_shell, cwd=cwd, **kwargs) out, err = process.communicate() if six.PY2: # py3: we get unicode strings, py2 not default_encoding = 'UTF-8' out = six.text_type(out, process.stdout.encoding or default_encoding) err = six.text_type(err, process.stderr.encoding or default_encoding) process.poll() assert process.returncode is not None command_result.stdout = out command_result.stderr = err command_result.returncode = process.returncode if cls.DEBUG: print("shell.cwd={0}".format(kwargs.get("cwd", None))) print("shell.command: {0}".format(" ".join(cmdargs))) print("shell.command.output:\n{0};".format(command_result.output)) except OSError as e: command_result.stderr = u"OSError: %s" % e command_result.returncode = e.errno assert e.errno != 0 postprocessors = cls.POSTPROCESSOR_MAP.get(command0) if postprocessors: command_result = cls.postprocess_command(postprocessors, command_result) return command_result
Make a subprocess call, collect its output and returncode. Returns CommandResult instance as ValueObject.
def get_sms_connection(backend=None, fail_silently=False, **kwds): """Load an sms backend and return an instance of it. If backend is None (default) settings.SMS_BACKEND is used. Both fail_silently and other keyword arguments are used in the constructor of the backend. https://github.com/django/django/blob/master/django/core/mail/__init__.py#L28 """ klass = import_string(backend or settings.SMS_BACKEND) return klass(fail_silently=fail_silently, **kwds)
Load an sms backend and return an instance of it. If backend is None (default) settings.SMS_BACKEND is used. Both fail_silently and other keyword arguments are used in the constructor of the backend. https://github.com/django/django/blob/master/django/core/mail/__init__.py#L28