code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def init_app(self, app): """ To initiate with Flask :param app: Flask object :return: """ provider = app.config.get("STORAGE_PROVIDER", None) key = app.config.get("STORAGE_KEY", None) secret = app.config.get("STORAGE_SECRET", None) container = app.config.get("STORAGE_CONTAINER", None) allowed_extensions = app.config.get("STORAGE_ALLOWED_EXTENSIONS", None) serve_files = app.config.get("STORAGE_SERVER", True) serve_files_url = app.config.get("STORAGE_SERVER_URL", "files") self.config["serve_files"] = serve_files self.config["serve_files_url"] = serve_files_url if not provider: raise ValueError("'STORAGE_PROVIDER' is missing") if provider.upper() == "LOCAL": if not os.path.isdir(container): raise IOError("Local Container (directory) '%s' is not a " "directory or doesn't exist for LOCAL provider" % container) self.__init__(provider=provider, key=key, secret=secret, container=container, allowed_extensions=allowed_extensions) self._register_file_server(app)
To initiate with Flask :param app: Flask object :return:
def add_watch_point(self, string, rating, importance=5): """ For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display """ d = {} d['string'] = string d['rating'] = rating d['importance'] = importance self.watch_points.append(d)
For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display
def temporarily_enabled(self): """ Temporarily enable the cache (useful for testing) """ old_setting = self.options.enabled self.enable() try: yield finally: self.options.enabled = old_setting
Temporarily enable the cache (useful for testing)
def credential_delete(self, *ids): """Delete one or more credentials. :param ids: one or more credential ids """ return self.raw_query("credential", "delete", data={ "credentials": [{"id": str(id)} for id in ids] })
Delete one or more credentials. :param ids: one or more credential ids
def add_house(self, complex: str, **kwargs): """ Add a new house to the rumetr db """ self.check_complex(complex) self.post('developers/{developer}/complexes/{complex}/houses/'.format(developer=self.developer, complex=complex), data=kwargs)
Add a new house to the rumetr db
def guinieranalysis(samplenames, qranges=None, qmax_from_shanum=True, prfunctions_postfix='', dist=None, plotguinier=True, graph_extension='.png', dmax=None, dmax_from_shanum=False): """Perform Guinier analysis on the samples. Inputs: samplenames: list of sample names qranges: dictionary of q ranges for each sample. The keys are sample names. The special '__default__' key corresponds to all samples which do not have a key in the dict. qmax_from_shanum: use the qmax determined by the shanum program for the GNOM input. prfunctions_postfix: The figure showing the P(r) functions will be saved as prfunctions_<prfunctions_postfix><graph_extension> dist: the sample-to-detector distance to use. plotguinier: if Guinier plots are needed. graph_extension: the extension of the saved graph image files. dmax: Dict of Dmax parameters. If not found or None, determine automatically using DATGNOM. If found, GNOM is used. The special key '__default__' works in a similar fashion as for `qranges`.""" figpr = plt.figure() ip = get_ipython() axpr = figpr.add_subplot(1, 1, 1) if qranges is None: qranges = {'__default__': (0, 1000000)} if dmax is None: dmax = {'__default__': None} if '__default__' not in qranges: qranges['__default__'] = (0, 1000000) if '__default__' not in dmax: dmax['__default__'] = None table_autorg = [['Name', 'Rg (nm)', 'I$_0$ (cm$^{-1}$ sr$^{-1}$)', 'q$_{min}$ (nm$^{-1}$)', 'q$_{max}$ (nm$^{-1}$)', 'qmin*Rg', 'qmax*Rg', 'quality', 'aggregation', 'Dmax (nm)', 'q$_{shanum}$ (nm$^{-1}$)']] table_gnom = [['Name', 'Rg (nm)', 'I$_0$ (cm$^{-1}$ sr$^{-1}$)', 'qmin (nm$^{-1}$)', 'qmax (nm$^{-1}$)', 'Dmin (nm)', 'Dmax (nm)', 'Total estimate', 'Porod volume (nm$^3$)']] results = {} for sn in samplenames: if sn not in qranges: print('Q-range not given for sample {}: using default one'.format(sn)) qrange = qranges['__default__'] else: qrange = qranges[sn] if sn not in dmax: dmax_ = dmax['__default__'] else: dmax_ = dmax[sn] print('Using q-range for sample {}: {} <= q <= {}'.format(sn, qrange[0], qrange[1])) curve = getsascurve(sn, dist)[0].trim(*qrange).sanitize() curve.save(sn + '.dat') try: Rg, I0, qmin, qmax, quality, aggregation = autorg(sn + '.dat') except ValueError: print('Error running autorg on %s' % sn) continue dmax_shanum, nsh, nopt, qmaxopt = shanum(sn + '.dat') if qmax_from_shanum: curve_trim = curve.trim(qmin, qmaxopt) else: curve_trim = curve.trim(qmin, qrange[1]) if dmax_from_shanum: dmax_ = dmax_from_shanum curve_trim.save(sn + '_optrange.dat') if dmax_ is None: print('Calling DATGNOM for sample {} with Rg={}, q-range from {} to {}'.format( sn, Rg.val, curve_trim.q.min(), curve_trim.q.max())) gnompr, metadata = datgnom(sn + '_optrange.dat', Rg=Rg.val, noprint=True) else: print('Calling GNOM for sample {} with Rmax={}, q-range from {} to {}'.format( sn, dmax_, curve_trim.q.min(), curve_trim.q.max())) gnompr, metadata = gnom(curve_trim, dmax_) rg, i0, vporod = datporod(sn + '_optrange.out') axpr.errorbar(gnompr[:, 0], gnompr[:, 1], gnompr[:, 2], None, label=sn) if plotguinier: figsample = plt.figure() axgnomfit = figsample.add_subplot(1, 2, 1) curve.errorbar('b.', axes=axgnomfit, label='measured') axgnomfit.errorbar(metadata['qj'], metadata['jexp'], metadata['jerror'], None, 'g.', label='gnom input') axgnomfit.loglog(metadata['qj'], metadata['jreg'], 'r-', label='regularized by GNOM') figsample.suptitle(sn) axgnomfit.set_xlabel('q (nm$^{-1}$)') axgnomfit.set_ylabel('$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)') axgnomfit.axvline(qmaxopt, 0, 1, linestyle='dashed', color='black', lw=2) axgnomfit.grid(True, which='both') axgnomfit.axis('tight') axgnomfit.legend(loc='best') axguinier = figsample.add_subplot(1, 2, 2) axguinier.errorbar(curve.q, curve.Intensity, curve.Error, curve.qError, '.', label='Measured') q = np.linspace(qmin, qmax, 100) axguinier.plot(q, I0.val * np.exp(-q ** 2 * Rg.val ** 2 / 3), label='AutoRg') axguinier.plot(q, metadata['I0_gnom'].val * np.exp(-q ** 2 * metadata['Rg_gnom'].val ** 2 / 3), label='Gnom') axguinier.set_xscale('power', exponent=2) axguinier.set_yscale('log') axguinier.set_xlabel('q (nm$^{-1}$)') axguinier.set_ylabel('$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)') axguinier.legend(loc='best') idxmin = np.arange(len(curve))[curve.q <= qmin].max() idxmax = np.arange(len(curve))[curve.q >= qmax].min() idxmin = max(0, idxmin - 5) idxmax = min(len(curve) - 1, idxmax + 5) if plotguinier: curveguinier = curve.trim(curve.q[idxmin], curve.q[idxmax]) axguinier.axis(xmax=curve.q[idxmax], xmin=curve.q[idxmin], ymin=curveguinier.Intensity.min(), ymax=curveguinier.Intensity.max()) axguinier.grid(True, which='both') table_gnom.append( [sn, metadata['Rg_gnom'].tostring(extra_digits=2), metadata['I0_gnom'].tostring(extra_digits=2), metadata['qmin'], metadata['qmax'], metadata['dmin'], metadata['dmax'], metadata['totalestimate_corrected'], vporod]) table_autorg.append([sn, Rg.tostring(extra_digits=2), I0, '%.3f' % qmin, '%.3f' % qmax, qmin * Rg, qmax * Rg, '%.1f %%' % (quality * 100), aggregation, '%.3f' % dmax_shanum, '%.3f' % qmaxopt]) if plotguinier: figsample.tight_layout() figsample.savefig(os.path.join(ip.user_ns['auximages_dir'], 'guinier_%s%s' % (sn, graph_extension)), dpi=600) results[sn] = { 'Rg_autorg' : Rg, 'I0_autorg': I0, 'qmin_autorg': qmin, 'qmax_autorg': qmax, 'quality' : quality, 'aggregation': aggregation, 'dmax_autorg': dmax_shanum, 'qmax_shanum': qmaxopt, 'Rg_gnom' : metadata['Rg_gnom'], 'I0_gnom' : metadata['I0_gnom'], 'qmin_gnom' : metadata['qmin'], 'qmax_gnom' : metadata['qmax'], 'dmin_gnom' : metadata['dmin'], 'dmax_gnom' : metadata['dmax'], 'VPorod' : vporod, } axpr.set_xlabel('r (nm)') axpr.set_ylabel('P(r)') axpr.legend(loc='best') axpr.grid(True, which='both') writemarkdown('## Results from autorg and shanum') tab = ipy_table.IpyTable(table_autorg) tab.apply_theme('basic') display(tab) writemarkdown('## Results from gnom') tab = ipy_table.IpyTable(table_gnom) tab.apply_theme('basic') if prfunctions_postfix and prfunctions_postfix[0] != '_': prfunctions_postfix = '_' + prfunctions_postfix figpr.tight_layout() figpr.savefig(os.path.join(ip.user_ns['auximages_dir'], 'prfunctions%s%s' % (prfunctions_postfix, graph_extension)), dpi=600) display(tab) return results
Perform Guinier analysis on the samples. Inputs: samplenames: list of sample names qranges: dictionary of q ranges for each sample. The keys are sample names. The special '__default__' key corresponds to all samples which do not have a key in the dict. qmax_from_shanum: use the qmax determined by the shanum program for the GNOM input. prfunctions_postfix: The figure showing the P(r) functions will be saved as prfunctions_<prfunctions_postfix><graph_extension> dist: the sample-to-detector distance to use. plotguinier: if Guinier plots are needed. graph_extension: the extension of the saved graph image files. dmax: Dict of Dmax parameters. If not found or None, determine automatically using DATGNOM. If found, GNOM is used. The special key '__default__' works in a similar fashion as for `qranges`.
def iter_packages(self, name, range_=None, paths=None): """Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator. """ for package in iter_packages(name, range_, paths): if not self.excludes(package): yield package
Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator.
def bind(_self, **kwargs): """Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance """ return Logger( {**_self._extra, **kwargs}, _self._exception, _self._record, _self._lazy, _self._ansi, _self._raw, _self._depth, )
Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance
def create_record(self, type, name, data, priority=None, port=None, weight=None, **kwargs): # pylint: disable=redefined-builtin """ Add a new DNS record to the domain :param str type: the type of DNS record to add (``"A"``, ``"CNAME"``, etc.) :param str name: the name (hostname, alias, etc.) of the new record :param str data: the value of the new record :param int priority: the priority of the new record (SRV and MX records only) :param int port: the port that the service is accessible on (SRV records only) :param int weight: the weight of records with the same priority (SRV records only) :param kwargs: additional fields to include in the API request :return: the new domain record :rtype: DomainRecord :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager data = { "type": type, "name": name, "data": data, "priority": priority, "port": port, "weight": weight, } data.update(kwargs) return self._record(api.request(self.record_url, method='POST', data=data)["domain_record"])
Add a new DNS record to the domain :param str type: the type of DNS record to add (``"A"``, ``"CNAME"``, etc.) :param str name: the name (hostname, alias, etc.) of the new record :param str data: the value of the new record :param int priority: the priority of the new record (SRV and MX records only) :param int port: the port that the service is accessible on (SRV records only) :param int weight: the weight of records with the same priority (SRV records only) :param kwargs: additional fields to include in the API request :return: the new domain record :rtype: DomainRecord :raises DOAPIError: if the API endpoint replies with an error
def _get_dimension_scales(self, dimension, preserve_domain=False): """ Return the list of scales corresponding to a given dimension. The preserve_domain optional argument specifies whether one should filter out the scales for which preserve_domain is set to True. """ if preserve_domain: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get('dimension') == dimension and not self.preserve_domain.get(k) ) ] else: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get('dimension') == dimension ) ]
Return the list of scales corresponding to a given dimension. The preserve_domain optional argument specifies whether one should filter out the scales for which preserve_domain is set to True.
def tool_classpath_from_products(products, key, scope): """Get a classpath for the tool previously registered under key in the given scope. :param products: The products of the current pants run. :type products: :class:`pants.goal.products.Products` :param string key: The key the tool configuration was registered under. :param string scope: The scope the tool configuration was registered under. :returns: A list of paths. :rtype: list """ callback_product_map = products.get_data('jvm_build_tools_classpath_callbacks') or {} callback = callback_product_map.get(scope, {}).get(key) if not callback: raise TaskError('No bootstrap callback registered for {key} in {scope}' .format(key=key, scope=scope)) return callback()
Get a classpath for the tool previously registered under key in the given scope. :param products: The products of the current pants run. :type products: :class:`pants.goal.products.Products` :param string key: The key the tool configuration was registered under. :param string scope: The scope the tool configuration was registered under. :returns: A list of paths. :rtype: list
def nvmlDeviceGetMultiGpuBoard(handle): r""" /** * Retrieves whether the device is on a Multi-GPU Board * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param multiGpuBool Reference in which to return a zero or non-zero value * to indicate whether the device is on a multi GPU board * * @return * - \ref NVML_SUCCESS if \a multiGpuBool has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard """ c_multiGpu = c_uint(); fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard") ret = fn(handle, byref(c_multiGpu)) _nvmlCheckReturn(ret) return bytes_to_str(c_multiGpu.value)
r""" /** * Retrieves whether the device is on a Multi-GPU Board * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param multiGpuBool Reference in which to return a zero or non-zero value * to indicate whether the device is on a multi GPU board * * @return * - \ref NVML_SUCCESS if \a multiGpuBool has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard
def idle_send_acks_and_nacks(self): '''Send packets to UAV in idle loop''' max_blocks_to_send = 10 blocks_sent = 0 i = 0 now = time.time() while (i < len(self.blocks_to_ack_and_nack) and blocks_sent < max_blocks_to_send): # print("ACKLIST: %s" % # ([x[1] for x in self.blocks_to_ack_and_nack],)) stuff = self.blocks_to_ack_and_nack[i] [master, block, status, first_sent, last_sent] = stuff if status == 1: # print("DFLogger: ACKing block (%d)" % (block,)) mavstatus = mavutil.mavlink.MAV_REMOTE_LOG_DATA_BLOCK_ACK (target_sys, target_comp) = self.sender self.master.mav.remote_log_block_status_send(target_sys, target_comp, block, mavstatus) blocks_sent += 1 del self.acking_blocks[block] del self.blocks_to_ack_and_nack[i] continue if block not in self.missing_blocks: # we've received this block now del self.blocks_to_ack_and_nack[i] continue # give up on packet if we have seen one with a much higher # number (or after 60 seconds): if (self.last_seqno - block > 200) or (now - first_sent > 60): if self.log_settings.verbose: print("DFLogger: Abandoning block (%d)" % (block,)) del self.blocks_to_ack_and_nack[i] del self.missing_blocks[block] self.abandoned += 1 continue i += 1 # only send each nack every-so-often: if last_sent is not None: if now - last_sent < 0.1: continue if self.log_settings.verbose: print("DFLogger: Asking for block (%d)" % (block,)) mavstatus = mavutil.mavlink.MAV_REMOTE_LOG_DATA_BLOCK_NACK (target_sys, target_comp) = self.sender self.master.mav.remote_log_block_status_send(target_sys, target_comp, block, mavstatus) blocks_sent += 1 stuff[4] = now
Send packets to UAV in idle loop
def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None): """**Description** Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. **Arguments** - **dashboard**: dashboard to edit - **name**: name of the new panel - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - ``number``: 1 metric only - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). **Success Return Value** A dictionary showing the details of the edited dashboard. **Example** `examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_ """ panel_configuration = { 'name': name, 'showAs': None, 'showAsType': None, 'metrics': [], 'gridConfiguration': { 'col': 1, 'row': 1, 'size_x': 12, 'size_y': 6 } } if panel_type == 'timeSeries': # # In case of a time series, the current dashboard implementation # requires the timestamp to be explicitly specified as "key". # However, this function uses the same abstraction of the data API # that doesn't require to specify a timestamp key (you only need to # specify time window and sampling) # metrics = copy.copy(metrics) metrics.insert(0, {'id': 'timestamp'}) # # Convert list of metrics to format used by Sysdig Monitor # property_names = {} k_count = 0 v_count = 0 for i, metric in enumerate(metrics): property_name = 'v' if 'aggregations' in metric else 'k' if property_name == 'k': i = k_count k_count += 1 else: i = v_count v_count += 1 property_names[metric['id']] = property_name + str(i) panel_configuration['metrics'].append({ 'metricId': metric['id'], 'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None, 'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None, 'propertyName': property_name + str(i) }) panel_configuration['scope'] = scope # if chart scope is equal to dashboard scope, set it as non override panel_configuration['overrideFilter'] = ('scope' in dashboard and dashboard['scope'] != scope) or ('scope' not in dashboard and scope != None) # # Configure panel type # if panel_type == 'timeSeries': panel_configuration['showAs'] = 'timeSeries' panel_configuration['showAsType'] = 'line' if limit != None: panel_configuration['paging'] = { 'from': 0, 'to': limit - 1 } elif panel_type == 'number': panel_configuration['showAs'] = 'summary' panel_configuration['showAsType'] = 'summary' elif panel_type == 'top': panel_configuration['showAs'] = 'top' panel_configuration['showAsType'] = 'bars' if sort_by is None: panel_configuration['sorting'] = [{ 'id': 'v0', 'mode': 'desc' }] else: panel_configuration['sorting'] = [{ 'id': property_names[sort_by['metric']], 'mode': sort_by['mode'] }] if limit is None: panel_configuration['paging'] = { 'from': 0, 'to': 10 } else: panel_configuration['paging'] = { 'from': 0, 'to': limit - 1 } # # Configure layout # if layout != None: panel_configuration['gridConfiguration'] = layout # # Clone existing dashboard... # dashboard_configuration = copy.deepcopy(dashboard) dashboard_configuration['id'] = None # # ... and add the new panel # dashboard_configuration['items'].append(panel_configuration) # # Update dashboard # res = requests.put(self.url + self._dashboards_api_endpoint + '/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}), verify=self.ssl_verify) return self._request_result(res)
**Description** Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. **Arguments** - **dashboard**: dashboard to edit - **name**: name of the new panel - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - ``number``: 1 metric only - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). **Success Return Value** A dictionary showing the details of the edited dashboard. **Example** `examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
def get_build_output(self, process): """ Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to. """ while True: output = process.stdout.readline() if output == b'' and process.poll() is not None: if process.returncode > 0: raise Exception("Compilation ended with an error" ".\nSTDERR\n%s\nSTDOUT\n%s" % (process.stderr.read(), process.stdout.read())) return if output: # Parse the output to get current and total tasks # This assumes the progress displayed by waf is in the form # [current/total] matches = re.search(r'\[\s*(\d+?)/(\d+)\].*', output.strip().decode('utf-8')) if matches is not None: yield [int(matches.group(1)), int(matches.group(2))]
Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to.
def _mkOp(fn): """ Function to create variadic operator function :param fn: function to perform binary operation """ def op(*operands, key=None) -> RtlSignalBase: """ :param operands: variadic parameter of input uperands :param key: optional function applied on every operand before processing """ assert operands, operands top = None if key is not None: operands = map(key, operands) for s in operands: if top is None: top = s else: top = fn(top, s) return top return op
Function to create variadic operator function :param fn: function to perform binary operation
def main(): """ Set up the server. """ parser = argparse.ArgumentParser(description='AFTV Server') parser.add_argument('-p', '--port', type=int, help='listen port', default=5556) parser.add_argument('-d', '--default', help='default Amazon Fire TV host', nargs='?') parser.add_argument('-c', '--config', type=str, help='Path to config file') args = parser.parse_args() if args.config: _add_devices_from_config(args) if args.default and not add('default', args.default): exit('invalid hostname') app.run(host='0.0.0.0', port=args.port)
Set up the server.
def _set_vni_any(self, v, load=False): """ Setter method for vni_any, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_any (empty) If this variable is read-only (config: false) in the source YANG file, then _set_vni_any is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_any() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="vni-any", rest_name="vni-any", parent=self, choice=(u'choice-vni', u'case-vni-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni any', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vni_any must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="vni-any", rest_name="vni-any", parent=self, choice=(u'choice-vni', u'case-vni-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni any', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""", }) self.__vni_any = t if hasattr(self, '_set'): self._set()
Setter method for vni_any, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_any (empty) If this variable is read-only (config: false) in the source YANG file, then _set_vni_any is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_any() directly.
def transpose_func(classes, table): """ Transpose table. :param classes: classes :type classes : list :param table: input matrix :type table : dict :return: transposed table as dict """ transposed_table = table for i, item1 in enumerate(classes): for j, item2 in enumerate(classes): if i > j: temp = transposed_table[item1][item2] transposed_table[item1][item2] = transposed_table[item2][item1] transposed_table[item2][item1] = temp return transposed_table
Transpose table. :param classes: classes :type classes : list :param table: input matrix :type table : dict :return: transposed table as dict
def get_next_action(self, request, application, label, roles): """ Django view method. We provide a default detail view for applications. """ # We only provide a view for when no label provided if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") # only certain actions make sense for default view actions = self.get_actions(request, application, roles) # process the request in default view if request.method == "GET": context = self.context context.update({ 'application': application, 'actions': actions, 'state': self.name, 'roles': roles}) return render( template_name='kgapplications/common_detail.html', context=context, request=request) elif request.method == "POST": for action in actions: if action in request.POST: return action # we don't know how to handle this request. return HttpResponseBadRequest("<h1>Bad Request</h1>")
Django view method. We provide a default detail view for applications.
def node_done(self, ssid=None): """ Release the servers for the specified ssid. The API doesn't provide any kind of output, try to be helpful by providing the list of servers to be released. :param ssid: ssid of the server pool :return: [ requested_hosts ] """ if self.api_key is None: raise exceptions.ApiKeyRequired if ssid is None: raise exceptions.SsidRequired # There is no body replied in this call so at least get the hosts for # the specified ssid to return them. requested_hosts = dict() for host in self.self_inventory: if ssid == self.self_inventory[host]['comment']: requested_hosts[host] = self.full_inventory[host] args = "key={key}&ssid={ssid}".format(key=self.api_key, ssid=ssid) resp, body = self.get('Node/done?%s' % args) return requested_hosts
Release the servers for the specified ssid. The API doesn't provide any kind of output, try to be helpful by providing the list of servers to be released. :param ssid: ssid of the server pool :return: [ requested_hosts ]
def rowsAboutToBeRemoved(self, parent, start, end): """Marks view for repaint. :qtdoc:`Re-implemented<QAbstractItemView.rowsAboutToBeRemoved>`""" self._viewIsDirty = True super(StimulusView, self).rowsAboutToBeRemoved(parent, start, end)
Marks view for repaint. :qtdoc:`Re-implemented<QAbstractItemView.rowsAboutToBeRemoved>`
def _validate_tileset(self, tileset): """Validate the tileset name and ensure that it includes the username """ if '.' not in tileset: tileset = "{0}.{1}".format(self.username, tileset) pattern = '^[a-z0-9-_]{1,32}\.[a-z0-9-_]{1,32}$' if not re.match(pattern, tileset, flags=re.IGNORECASE): raise ValidationError( 'tileset {0} is invalid, must match r"{1}"'.format( tileset, pattern)) return tileset
Validate the tileset name and ensure that it includes the username
def take(self, obj): """Get cached value and clean cache.""" cached = self._thread_local.cache[self._get_cache_key(obj)] build_kwargs = {} if 'model' in cached and 'pks' in cached: build_kwargs['queryset'] = cached['model'].objects.filter(pk__in=cached['pks']) elif 'obj' in cached: if cached['obj'].__class__.objects.filter(pk=cached['obj'].pk).exists(): build_kwargs['obj'] = cached['obj'] else: # Object was deleted in the meantime. build_kwargs['queryset'] = cached['obj'].__class__.objects.none() self._clean_cache(obj) return build_kwargs
Get cached value and clean cache.
def send(self, msg, timeout=None): """ Send a message over the serial device. :param can.Message msg: Message to send. .. note:: Flags like ``extended_id``, ``is_remote_frame`` and ``is_error_frame`` will be ignored. .. note:: If the timestamp is a float value it will be converted to an integer. :param timeout: This parameter will be ignored. The timeout value of the channel is used instead. """ try: timestamp = struct.pack('<I', int(msg.timestamp * 1000)) except struct.error: raise ValueError('Timestamp is out of range') try: a_id = struct.pack('<I', msg.arbitration_id) except struct.error: raise ValueError('Arbitration Id is out of range') byte_msg = bytearray() byte_msg.append(0xAA) for i in range(0, 4): byte_msg.append(timestamp[i]) byte_msg.append(msg.dlc) for i in range(0, 4): byte_msg.append(a_id[i]) for i in range(0, msg.dlc): byte_msg.append(msg.data[i]) byte_msg.append(0xBB) self.ser.write(byte_msg)
Send a message over the serial device. :param can.Message msg: Message to send. .. note:: Flags like ``extended_id``, ``is_remote_frame`` and ``is_error_frame`` will be ignored. .. note:: If the timestamp is a float value it will be converted to an integer. :param timeout: This parameter will be ignored. The timeout value of the channel is used instead.
def has_mixture_channel(val: Any) -> bool: """Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions. """ mixture_getter = getattr(val, '_has_mixture_', None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result result = has_unitary(val) if result is not NotImplemented and result: return result # No _has_mixture_ or _has_unitary_ function, use _mixture_ instead. return mixture_channel(val, None) is not None
Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions.
def guess_xml_encoding(self, content): r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None """ matchobj = self.__regex['xml_encoding'].match(content) return matchobj and matchobj.group(1).lower()
r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None
def clean(self): """Routine to return C/NOFS IVM data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty' """ # cleans cindi data if self.clean_level == 'clean': # choose areas below 550km # self.data = self.data[self.data.alt <= 550] idx, = np.where(self.data.altitude <= 550) self.data = self[idx,:] # make sure all -999999 values are NaN self.data.replace(-999999., np.nan, inplace=True) if (self.clean_level == 'clean') | (self.clean_level == 'dusty'): try: idx, = np.where(np.abs(self.data.ionVelmeridional) < 10000.) self.data = self[idx,:] except AttributeError: pass if self.clean_level == 'dusty': # take out all values where RPA data quality is > 1 idx, = np.where(self.data.RPAflag <= 1) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 3) ] else: # take out all values where RPA data quality is > 0 idx, = np.where(self.data.RPAflag <= 0) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 0) ] if self.clean_level == 'dirty': # take out all values where RPA data quality is > 4 idx, = np.where(self.data.RPAflag <= 4) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 6) ] # basic quality check on drifts and don't let UTS go above 86400. idx, = np.where(self.data.time <= 86400.) self.data = self[idx,:] # make sure MLT is between 0 and 24 idx, = np.where((self.data.mlt >= 0) & (self.data.mlt <= 24.)) self.data = self[idx,:] return
Routine to return C/NOFS IVM data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty'
def _filehandle(self): """ Return a filehandle to the file being tailed, with the position set to the current offset. """ if not self._fh or self._is_closed(): filename = self._rotated_logfile or self.filename if filename.endswith('.gz'): self._fh = gzip.open(filename, 'r') else: self._fh = open(filename, "r", 1) if self.read_from_end and not exists(self._offset_file): self._fh.seek(0, os.SEEK_END) else: self._fh.seek(self._offset) return self._fh
Return a filehandle to the file being tailed, with the position set to the current offset.
def isCompatible(self, other, cls): """ Evaluate interpolation compatibility with other. """ if not isinstance(other, cls): raise TypeError( """Compatibility between an instance of %r and an \ instance of %r can not be checked.""" % (cls.__name__, other.__class__.__name__)) reporter = self.compatibilityReporterClass(self, other) self._isCompatible(other, reporter) return not reporter.fatal, reporter
Evaluate interpolation compatibility with other.
def saddr(address): """Return a string representation for an address. The *address* paramater can be a pipe name, an IP address tuple, or a socket address. The return value is always a ``str`` instance. """ if isinstance(address, six.string_types): return address elif isinstance(address, tuple) and len(address) >= 2 and ':' in address[0]: return '[{}]:{}'.format(address[0], address[1]) elif isinstance(address, tuple) and len(address) >= 2: return '{}:{}'.format(*address) else: raise TypeError('illegal address type: {!s}'.format(type(address)))
Return a string representation for an address. The *address* paramater can be a pipe name, an IP address tuple, or a socket address. The return value is always a ``str`` instance.
def _split_header(header): """Turn Authorization: header into parameters.""" params = {} parts = header.split(',') for param in parts: # Ignore realm parameter. if param.find('realm') > -1: continue # Remove whitespace. param = param.strip() # Split key-value. param_parts = param.split('=', 1) # Remove quotes and unescape the value. params[param_parts[0]] = unquote(param_parts[1].strip('\"')) return params
Turn Authorization: header into parameters.
def solid_angle(center, coords): """ Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle. """ # Compute the displacement from the center r = [np.subtract(c, center) for c in coords] # Compute the magnitude of each vector r_norm = [np.linalg.norm(i) for i in r] # Compute the solid angle for each tetrahedron that makes up the facet # Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron angle = 0 for i in range(1, len(r) - 1): j = i + 1 tp = np.abs(np.dot(r[0], np.cross(r[i], r[j]))) de = r_norm[0] * r_norm[i] * r_norm[j] + \ r_norm[j] * np.dot(r[0], r[i]) + \ r_norm[i] * np.dot(r[0], r[j]) + \ r_norm[0] * np.dot(r[i], r[j]) if de == 0: my_angle = 0.5 * pi if tp > 0 else -0.5 * pi else: my_angle = np.arctan(tp / de) angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2 return angle
Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle.
def create(parallel): """Create a queue based on the provided parallel arguments. TODO Startup/tear-down. Currently using default queue for testing """ queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]} yield queue
Create a queue based on the provided parallel arguments. TODO Startup/tear-down. Currently using default queue for testing
def _set_show_firmware_option(self, v, load=False): """ Setter method for show_firmware_option, mapped from YANG variable /show/show_firmware_dummy/show_firmware_option (container) If this variable is read-only (config: false) in the source YANG file, then _set_show_firmware_option is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_firmware_option() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_firmware_option.show_firmware_option, is_container='container', presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'show firmware', u'alt-name': u'firmware', u'display-when': u'(/local-node/swbd-number = "4000")'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_firmware_option must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=show_firmware_option.show_firmware_option, is_container='container', presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'show firmware', u'alt-name': u'firmware', u'display-when': u'(/local-node/swbd-number = "4000")'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__show_firmware_option = t if hasattr(self, '_set'): self._set()
Setter method for show_firmware_option, mapped from YANG variable /show/show_firmware_dummy/show_firmware_option (container) If this variable is read-only (config: false) in the source YANG file, then _set_show_firmware_option is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_firmware_option() directly.
def check_docstring_sections(self, definition, docstring): """D21{4,5}, D4{05,06,07,08,09,10}: Docstring sections checks. Check the general format of a sectioned docstring: '''This is my one-liner. Short Summary ------------- This is my summary. Returns ------- None. ''' Section names appear in `SECTION_NAMES`. """ if not docstring: return lines = docstring.split("\n") if len(lines) < 2: return lower_section_names = [s.lower() for s in self.SECTION_NAMES] def _suspected_as_section(_line): result = self._get_leading_words(_line.lower()) return result in lower_section_names # Finding our suspects. suspected_section_indices = [i for i, line in enumerate(lines) if _suspected_as_section(line)] SectionContext = namedtuple('SectionContext', ('section_name', 'previous_line', 'line', 'following_lines', 'original_index', 'is_last_section')) # First - create a list of possible contexts. Note that the # `following_lines` member is until the end of the docstring. contexts = (SectionContext(self._get_leading_words(lines[i].strip()), lines[i - 1], lines[i], lines[i + 1:], i, False) for i in suspected_section_indices) # Now that we have manageable objects - rule out false positives. contexts = (c for c in contexts if self._is_a_docstring_section(c)) # Now we shall trim the `following lines` field to only reach the # next section name. for a, b in pairwise(contexts, None): end = -1 if b is None else b.original_index new_ctx = SectionContext(a.section_name, a.previous_line, a.line, lines[a.original_index + 1:end], a.original_index, b is None) for err in self._check_section(docstring, definition, new_ctx): yield err
D21{4,5}, D4{05,06,07,08,09,10}: Docstring sections checks. Check the general format of a sectioned docstring: '''This is my one-liner. Short Summary ------------- This is my summary. Returns ------- None. ''' Section names appear in `SECTION_NAMES`.
def import_key_pair(name, key, profile, key_type=None, **libcloud_kwargs): ''' Import a new public key from string or a file path :param name: Key pair name. :type name: ``str`` :param key: Public key material, the string or a path to a file :type key: ``str`` or path ``str`` :param profile: The profile key :type profile: ``str`` :param key_type: The key pair type, either `FILE` or `STRING`. Will detect if not provided and assume that if the string is a path to an existing path it is a FILE, else STRING. :type key_type: ``str`` :param libcloud_kwargs: Extra arguments for the driver's import_key_pair_from_xxx method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.import_key_pair pair1 key_value_data123 profile1 salt myminion libcloud_compute.import_key_pair pair1 /path/to/key profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if os.path.exists(key) or key_type == 'FILE': return _simple_key_pair(conn.import_key_pair_from_file(name, key, **libcloud_kwargs)) else: return _simple_key_pair(conn.import_key_pair_from_string(name, key, **libcloud_kwargs))
Import a new public key from string or a file path :param name: Key pair name. :type name: ``str`` :param key: Public key material, the string or a path to a file :type key: ``str`` or path ``str`` :param profile: The profile key :type profile: ``str`` :param key_type: The key pair type, either `FILE` or `STRING`. Will detect if not provided and assume that if the string is a path to an existing path it is a FILE, else STRING. :type key_type: ``str`` :param libcloud_kwargs: Extra arguments for the driver's import_key_pair_from_xxx method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.import_key_pair pair1 key_value_data123 profile1 salt myminion libcloud_compute.import_key_pair pair1 /path/to/key profile1
def _skip_trampoline(handler): """Intercepts events from container handlers, emitting them only if they should not be skipped.""" data_event, self = (yield None) delegate = handler event = None depth = 0 while True: def pass_through(): _trans = delegate.send(Transition(data_event, delegate)) return _trans, _trans.delegate, _trans.event if data_event is not None and data_event.type is ReadEventType.SKIP: while True: trans, delegate, event = pass_through() if event is not None: if event.event_type is IonEventType.CONTAINER_END and event.depth <= depth: break if event is None or event.event_type is IonEventType.INCOMPLETE: data_event, _ = yield Transition(event, self) else: trans, delegate, event = pass_through() if event is not None and (event.event_type is IonEventType.CONTAINER_START or event.event_type is IonEventType.CONTAINER_END): depth = event.depth data_event, _ = yield Transition(event, self)
Intercepts events from container handlers, emitting them only if they should not be skipped.
def add_node(node, **kwds): """add_node from Sphinx """ nodes._add_node_class_names([node.__name__]) for key, val in kwds.iteritems(): try: visit, depart = val except ValueError: raise ValueError('Value for key %r must be a ' '(visit, depart) function tuple' % key) if key == 'html': from docutils.writers.html4css1 import HTMLTranslator as translator elif key == 'latex': from docutils.writers.latex2e import LaTeXTranslator as translator else: # ignore invalid keys for compatibility continue setattr(translator, 'visit_'+node.__name__, visit) if depart: setattr(translator, 'depart_'+node.__name__, depart)
add_node from Sphinx
def is_quote_artifact(orig_text, span): """Distinguish between quotes and units.""" res = False cursor = re.finditer(r'("|\')[^ .,:;?!()*+-].*?("|\')', orig_text) for item in cursor: if item.span()[1] == span[1]: res = True return res
Distinguish between quotes and units.
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data.get("properties", {}) raw_content = properties.get("ipConfiguration", None) if raw_content is not None: resource = Resource.from_raw_data(raw_content) properties["ipConfiguration"] = resource return super(PublicIPAddresses, cls).process_raw_data(raw_data)
Create a new model using raw API response.
def maybe_timeout_options(self): """Implements the NailgunProtocol.TimeoutProvider interface.""" if self._exit_timeout_start_time: return NailgunProtocol.TimeoutOptions(self._exit_timeout_start_time, self._exit_timeout) else: return None
Implements the NailgunProtocol.TimeoutProvider interface.
def timestamp_datetime(self, timestamp): """ 将uninx时间戳转换为可读性的时间 """ format = '%Y-%m-%d %H:%M:%S' # timestamp为传入的值为时间戳(10位整数),如:1332888820 timestamp = time.localtime(timestamp) ## 经过localtime转换后变成 ## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0) # 最后再经过strftime函数转换为正常日期格式。 return time.strftime(format, timestamp)
将uninx时间戳转换为可读性的时间
def get_memory_annotations(cls, exclude=None): """Get annotations in memory which inherits from cls. :param tuple/type exclude: annotation type(s) to exclude from search. :return: found annotations which inherits from cls. :rtype: set """ result = set() # get global dictionary annotations_in_memory = Annotation.__ANNOTATIONS_IN_MEMORY__ exclude = () if exclude is None else exclude # iterate on annotation classes for annotation_cls in annotations_in_memory: # if annotation class is excluded, continue if issubclass(annotation_cls, exclude): continue # if annotation class inherits from self, add it in the result if issubclass(annotation_cls, cls): result |= annotations_in_memory[annotation_cls] return result
Get annotations in memory which inherits from cls. :param tuple/type exclude: annotation type(s) to exclude from search. :return: found annotations which inherits from cls. :rtype: set
def load_data_split(proc_data_dir): """Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data) """ ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return ds_train, ds_val, ds_test
Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data)
def _build(self, inputs, is_training): """Connects the module to some inputs. Args: inputs: Tensor, final dimension must be equal to embedding_dim. All other leading dimensions will be flattened and treated as a large batch. is_training: boolean, whether this connection is to training data. Returns: dict containing the following keys and values: quantize: Tensor containing the quantized version of the input. loss: Tensor containing the loss to optimize. perplexity: Tensor containing the perplexity of the encodings. encodings: Tensor containing the discrete encodings, ie which element of the quantized space each input element was mapped to. encoding_indices: Tensor containing the discrete encoding indices, ie which element of the quantized space each input element was mapped to. """ # Assert last dimension is same as self._embedding_dim input_shape = tf.shape(inputs) with tf.control_dependencies([ tf.Assert(tf.equal(input_shape[-1], self._embedding_dim), [input_shape])]): flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim]) distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True) - 2 * tf.matmul(flat_inputs, self._w) + tf.reduce_sum(self._w ** 2, 0, keepdims=True)) encoding_indices = tf.argmax(- distances, 1) encodings = tf.one_hot(encoding_indices, self._num_embeddings) encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1]) quantized = self.quantize(encoding_indices) e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2) q_latent_loss = tf.reduce_mean((quantized - tf.stop_gradient(inputs)) ** 2) loss = q_latent_loss + self._commitment_cost * e_latent_loss quantized = inputs + tf.stop_gradient(quantized - inputs) avg_probs = tf.reduce_mean(encodings, 0) perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10))) return {'quantize': quantized, 'loss': loss, 'perplexity': perplexity, 'encodings': encodings, 'encoding_indices': encoding_indices,}
Connects the module to some inputs. Args: inputs: Tensor, final dimension must be equal to embedding_dim. All other leading dimensions will be flattened and treated as a large batch. is_training: boolean, whether this connection is to training data. Returns: dict containing the following keys and values: quantize: Tensor containing the quantized version of the input. loss: Tensor containing the loss to optimize. perplexity: Tensor containing the perplexity of the encodings. encodings: Tensor containing the discrete encodings, ie which element of the quantized space each input element was mapped to. encoding_indices: Tensor containing the discrete encoding indices, ie which element of the quantized space each input element was mapped to.
def compute(self): """Computes the tendencies for all state variables given current state and specified input. The function first computes all diagnostic processes. They don't produce any tendencies directly but they may affect the other processes (such as change in solar distribution). Subsequently, all tendencies and diagnostics for all explicit processes are computed. Tendencies due to implicit and adjustment processes need to be calculated from a state that is already adjusted after explicit alteration. For that reason the explicit tendencies are applied to the states temporarily. Now all tendencies from implicit processes are calculated by matrix inversions and similar to the explicit tendencies, the implicit ones are applied to the states temporarily. Subsequently, all instantaneous adjustments are computed. Then the changes that were made to the states from explicit and implicit processes are removed again as this :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` function is supposed to calculate only tendencies and not apply them to the states. Finally, all calculated tendencies from all processes are collected for each state, summed up and stored in the dictionary ``self.tendencies``, which is an attribute of the time-dependent-process object, for which the :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` method has been called. **Object attributes** \n During method execution following object attributes are modified: :ivar dict tendencies: dictionary that holds tendencies for all states is calculated for current timestep through adding up tendencies from explicit, implicit and adjustment processes. :ivar dict diagnostics: process diagnostic dictionary is updated by diagnostic dictionaries of subprocesses after computation of tendencies. """ # First reset tendencies to zero -- recomputing them is the point of this method for varname in self.tendencies: self.tendencies[varname] *= 0. if not self.has_process_type_list: self._build_process_type_list() tendencies = {} ignored = self._compute_type('diagnostic') tendencies['explicit'] = self._compute_type('explicit') # Tendencies due to implicit and adjustment processes need to be # calculated from a state that is already adjusted after explicit stuff # So apply the tendencies temporarily and then remove them again for name, var in self.state.items(): var += tendencies['explicit'][name] * self.timestep # Now compute all implicit processes -- matrix inversions tendencies['implicit'] = self._compute_type('implicit') # Same deal ... temporarily apply tendencies from implicit step for name, var in self.state.items(): var += tendencies['implicit'][name] * self.timestep # Finally compute all instantaneous adjustments -- expressed as explicit forward step tendencies['adjustment'] = self._compute_type('adjustment') # Now remove the changes from the model state for name, var in self.state.items(): var -= ( (tendencies['implicit'][name] + tendencies['explicit'][name]) * self.timestep) # Sum up all subprocess tendencies for proctype in ['explicit', 'implicit', 'adjustment']: for varname, tend in tendencies[proctype].items(): self.tendencies[varname] += tend # Finally compute my own tendencies, if any self_tend = self._compute() # Adjustment processes _compute method returns absolute adjustment # Needs to be converted to rate of change if self.time_type is 'adjustment': for varname, adj in self_tend.items(): self_tend[varname] /= self.timestep for varname, tend in self_tend.items(): self.tendencies[varname] += tend return self.tendencies
Computes the tendencies for all state variables given current state and specified input. The function first computes all diagnostic processes. They don't produce any tendencies directly but they may affect the other processes (such as change in solar distribution). Subsequently, all tendencies and diagnostics for all explicit processes are computed. Tendencies due to implicit and adjustment processes need to be calculated from a state that is already adjusted after explicit alteration. For that reason the explicit tendencies are applied to the states temporarily. Now all tendencies from implicit processes are calculated by matrix inversions and similar to the explicit tendencies, the implicit ones are applied to the states temporarily. Subsequently, all instantaneous adjustments are computed. Then the changes that were made to the states from explicit and implicit processes are removed again as this :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` function is supposed to calculate only tendencies and not apply them to the states. Finally, all calculated tendencies from all processes are collected for each state, summed up and stored in the dictionary ``self.tendencies``, which is an attribute of the time-dependent-process object, for which the :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` method has been called. **Object attributes** \n During method execution following object attributes are modified: :ivar dict tendencies: dictionary that holds tendencies for all states is calculated for current timestep through adding up tendencies from explicit, implicit and adjustment processes. :ivar dict diagnostics: process diagnostic dictionary is updated by diagnostic dictionaries of subprocesses after computation of tendencies.
def multiline_merge(lines, current_event, re_after, re_before): """ Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event. """ events = [] for line in lines: if re_before and re_before.match(line): current_event.append(line) elif re_after and current_event and re_after.match(current_event[-1]): current_event.append(line) else: if current_event: events.append('\n'.join(current_event)) current_event.clear() current_event.append(line) return events
Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event.
def from_dict(input_dict, data=None): """ Instantiate an SparseGPClassification object using the information in input_dict (built by the to_dict method). :param data: It is used to provide X and Y for the case when the model was saved using save_data=False in to_dict method. :type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`) """ import GPy m = GPy.core.model.Model.from_dict(input_dict, data) from copy import deepcopy sparse_gp = deepcopy(m) return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
Instantiate an SparseGPClassification object using the information in input_dict (built by the to_dict method). :param data: It is used to provide X and Y for the case when the model was saved using save_data=False in to_dict method. :type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
def apply_new_global_variable_name(self, path, new_gv_name): """Change global variable name/key according handed string Updates the global variable name only if different and already in list store. :param path: The path identifying the edited global variable tree view row, can be str, int or tuple. :param str new_gv_name: New global variable name """ gv_name = self.list_store[path][self.NAME_STORAGE_ID] if gv_name == new_gv_name or not self.global_variable_is_editable(gv_name, 'Name change'): return data_value = self.model.global_variable_manager.get_representation(gv_name) data_type = self.model.global_variable_manager.get_data_type(gv_name) try: self.model.global_variable_manager.delete_variable(gv_name) self.model.global_variable_manager.set_variable(new_gv_name, data_value, data_type=data_type) gv_name = new_gv_name except (AttributeError, RuntimeError, TypeError) as e: logger.warning("Can not apply new name '{0}'".format(e)) self.update_global_variables_list_store() self.select_entry(gv_name) # informing the tab key feature handler function about the changed core element id if hasattr(self.tree_view_keypress_callback.__func__, "core_element_id"): self.tree_view_keypress_callback.__func__.core_element_id = gv_name
Change global variable name/key according handed string Updates the global variable name only if different and already in list store. :param path: The path identifying the edited global variable tree view row, can be str, int or tuple. :param str new_gv_name: New global variable name
def get_template(self, context, **kwargs): """ Returns the template to be used for the current context and arguments. """ if 'template' in kwargs['params']: self.template = kwargs['params']['template'] return super(GoscaleTemplateInclusionTag, self).get_template(context, **kwargs)
Returns the template to be used for the current context and arguments.
def capture_message(sock, get_channel=False): """ Captures a message from given socket. :param socket.socket sock: The socket to read a message from. :param bool get_channel: Find out which channel the message comes from. :return: The received message, or None on failure. """ # Fetching the Arb ID, DLC and Data try: if get_channel: if HAS_NATIVE_SUPPORT: cf, addr = sock.recvfrom(CANFD_MTU) channel = addr[0] if isinstance(addr, tuple) else addr else: data = ctypes.create_string_buffer(CANFD_MTU) addr = ctypes.create_string_buffer(32) addrlen = ctypes.c_int(len(addr)) received = libc.recvfrom(sock.fileno(), data, len(data), 0, addr, ctypes.byref(addrlen)) cf = data.raw[:received] # Figure out the channel name family, ifindex = struct.unpack_from("Hi", addr.raw) assert family == AF_CAN data = struct.pack("16xi", ifindex) res = fcntl.ioctl(sock, SIOCGIFNAME, data) channel = ctypes.create_string_buffer(res).value.decode() else: cf = sock.recv(CANFD_MTU) channel = None except socket.error as exc: raise can.CanError("Error receiving: %s" % exc) can_id, can_dlc, flags, data = dissect_can_frame(cf) #log.debug('Received: can_id=%x, can_dlc=%x, data=%s', can_id, can_dlc, data) # Fetching the timestamp binary_structure = "@LL" res = fcntl.ioctl(sock, SIOCGSTAMP, struct.pack(binary_structure, 0, 0)) seconds, microseconds = struct.unpack(binary_structure, res) timestamp = seconds + microseconds * 1e-6 # EXT, RTR, ERR flags -> boolean attributes # /* special address description flags for the CAN_ID */ # #define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */ # #define CAN_RTR_FLAG 0x40000000U /* remote transmission request */ # #define CAN_ERR_FLAG 0x20000000U /* error frame */ is_extended_frame_format = bool(can_id & CAN_EFF_FLAG) is_remote_transmission_request = bool(can_id & CAN_RTR_FLAG) is_error_frame = bool(can_id & CAN_ERR_FLAG) is_fd = len(cf) == CANFD_MTU bitrate_switch = bool(flags & CANFD_BRS) error_state_indicator = bool(flags & CANFD_ESI) if is_extended_frame_format: #log.debug("CAN: Extended") # TODO does this depend on SFF or EFF? arbitration_id = can_id & 0x1FFFFFFF else: #log.debug("CAN: Standard") arbitration_id = can_id & 0x000007FF msg = Message(timestamp=timestamp, channel=channel, arbitration_id=arbitration_id, is_extended_id=is_extended_frame_format, is_remote_frame=is_remote_transmission_request, is_error_frame=is_error_frame, is_fd=is_fd, bitrate_switch=bitrate_switch, error_state_indicator=error_state_indicator, dlc=can_dlc, data=data) #log_rx.debug('Received: %s', msg) return msg
Captures a message from given socket. :param socket.socket sock: The socket to read a message from. :param bool get_channel: Find out which channel the message comes from. :return: The received message, or None on failure.
def on_connected(self, headers, body): """ Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats are not 0, we start up the heartbeat loop accordingly. :param dict headers: headers in the connection message :param body: the message body """ if 'heart-beat' in headers: self.heartbeats = utils.calculate_heartbeats( headers['heart-beat'].replace(' ', '').split(','), self.heartbeats) if self.heartbeats != (0, 0): self.send_sleep = self.heartbeats[0] / 1000 # by default, receive gets an additional grace of 50% # set a different heart-beat-receive-scale when creating the connection to override that self.receive_sleep = (self.heartbeats[1] / 1000) * self.heart_beat_receive_scale log.debug("Setting receive_sleep to %s", self.receive_sleep) # Give grace of receiving the first heartbeat self.received_heartbeat = monotonic() + self.receive_sleep self.running = True if self.heartbeat_thread is None: self.heartbeat_thread = utils.default_create_thread( self.__heartbeat_loop) self.heartbeat_thread.name = "StompHeartbeat%s" % \ getattr(self.heartbeat_thread, "name", "Thread")
Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats are not 0, we start up the heartbeat loop accordingly. :param dict headers: headers in the connection message :param body: the message body
def merge_segments(filename, scan, cleanup=True, sizelimit=0): """ Merges cands/noise pkl files from multiple segments to single cands/noise file. Expects segment cands pkls with have (1) state dict and (2) cands dict. Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot. if cleanup, it will remove segments after merging. if sizelimit, it will reduce the output file to be less than this many MB. """ workdir = os.path.dirname(filename) fileroot = os.path.basename(filename) candslist = glob.glob(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + 'seg*.pkl')) noiselist = glob.glob(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + 'seg*.pkl')) candssegs = sorted([candsfile.rstrip('.pkl').split('seg')[1] for candsfile in candslist]) noisesegs = sorted([noisefile.rstrip('.pkl').split('seg')[1] for noisefile in noiselist]) # test for good list with segments if not candslist and not noiselist: logger.warn('candslist and noiselist are empty.') return # aggregate cands over segments if not os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')): logger.info('Aggregating cands over segments %s for fileroot %s, scan %d' % (str(candssegs), fileroot, scan)) logger.debug('%s' % candslist) cands = {} for candsfile in candslist: with open(candsfile, 'r') as pkl: state = pickle.load(pkl) result = pickle.load(pkl) for kk in result.keys(): cands[kk] = result[kk] segment = state.pop('segment') # remove this key, as it has no meaning after merging segments # optionally limit size if sizelimit and len(cands): logger.debug('Checking size of cands dictionary...') if 'snr2' in state['features']: snrcol = state['features'].index('snr2') elif 'snr1' in state['features']: snrcol = state['features'].index('snr1') candsize = sys.getsizeof(cands[cands.keys()[0]])/1e6 maxlen = int(sizelimit/candsize) if len(cands) > maxlen: # need to reduce length to newlen logger.info('cands dictionary of length %.1f would exceed sizelimit of %d MB. Trimming to strongest %d candidates' % (len(cands), sizelimit, maxlen)) snrs = [abs(cands[k][snrcol]) for k in cands.iterkeys()] # take top snrs snrsort = sorted(snrs, reverse=True) snrmax = snrsort[maxlen] # get min snr for given length limit cands = {k: v for k,v in cands.items() if abs(v[snrcol]) > snrmax} # new cands dict # write cands to single file with open(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl: pickle.dump(state, pkl, protocol=2) pickle.dump( (np.array(cands.keys()), np.array(cands.values())), pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')): for candsfile in candslist: os.remove(candsfile) else: logger.warn('Merged candsfile already exists for scan %d. Not merged.' % scan) # aggregate noise over segments if not os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')): logger.info('Aggregating noise over segments %s for fileroot %s, scan %d' % (str(noisesegs), fileroot, scan)) logger.debug('%s' % noiselist) noise = [] for noisefile in noiselist: with open(noisefile, 'r') as pkl: result = pickle.load(pkl) # gets all noises for segment as list noise += result # write noise to single file if len(noise): with open(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl: pickle.dump(noise, pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')): for noisefile in noiselist: os.remove(noisefile) else: logger.warn('Merged noisefile already exists for scan %d. Not merged.' % scan)
Merges cands/noise pkl files from multiple segments to single cands/noise file. Expects segment cands pkls with have (1) state dict and (2) cands dict. Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot. if cleanup, it will remove segments after merging. if sizelimit, it will reduce the output file to be less than this many MB.
def one_of(s): '''Parser a char from specified string.''' @Parser def one_of_parser(text, index=0): if index < len(text) and text[index] in s: return Value.success(index + 1, text[index]) else: return Value.failure(index, 'one of {}'.format(s)) return one_of_parser
Parser a char from specified string.
def get_access_token(client_id, client_secret): ''' Name: token Parameters: client_id, client_secret Return: dictionary ''' headers = {'Content-Type': 'application/x-www-form-urlencoded'} payload = { 'client_id': client_id, 'client_secret': client_secret } request = requests.post(token_url, data=payload, headers=headers) if request.status_code == 200: token = request.json() return token return {'status': request.status_code, "message": request.text}
Name: token Parameters: client_id, client_secret Return: dictionary
def type(self): """return the type of the Line """ properties = {self.is_code: "code", self.is_data: "data", self.is_string: "string", self.is_tail: "tail", self.is_unknown: "unknown"} for k, v in properties.items(): if k: return v
return the type of the Line
def get_standard_form(self, data): """Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation. data : a text in the given scheme. """ if self.synonym_map is None: return data from indic_transliteration import sanscript return sanscript.transliterate(data=sanscript.transliterate(_from=self.name, _to=sanscript.DEVANAGARI, data=data), _from=sanscript.DEVANAGARI, _to=self.name)
Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation. data : a text in the given scheme.
def currencyFormat(_context, code, symbol, format, currency_digits=True, decimal_quantization=True, name=''): """Handle currencyFormat subdirectives.""" _context.action( discriminator=('currency', name, code), callable=_register_currency, args=(name, code, symbol, format, currency_digits, decimal_quantization) )
Handle currencyFormat subdirectives.
def is_locked(self): """Return True, if URI is locked.""" if self.provider.lock_manager is None: return False return self.provider.lock_manager.is_url_locked(self.get_ref_url())
Return True, if URI is locked.
def send_static_file(self, filename): """ Send static files from the static folder in the current selected theme prior to the global static folder. :param filename: static filename :return: response object """ if self.config['MODE'] == 'api-only': # if 'api-only' mode is set, we should not send static files abort(404) theme_static_folder = getattr(self, 'theme_static_folder', None) if theme_static_folder: try: return send_from_directory(theme_static_folder, filename) except NotFound: pass return super(CustomFlask, self).send_static_file(filename)
Send static files from the static folder in the current selected theme prior to the global static folder. :param filename: static filename :return: response object
def collect_hunt_results(self, hunt): """Download current set of files in results. Args: hunt: The GRR hunt object to download files from. Returns: list: tuples containing: str: human-readable description of the source of the collection. For example, the name of the source host. str: path to the collected data. Raises: ValueError: if approval is needed and approvers were not specified. """ if not os.path.isdir(self.output_path): os.makedirs(self.output_path) output_file_path = os.path.join( self.output_path, '.'.join((self.hunt_id, 'zip'))) if os.path.exists(output_file_path): print('{0:s} already exists: Skipping'.format(output_file_path)) return None self._check_approval_wrapper( hunt, self._get_and_write_archive, hunt, output_file_path) results = self._extract_hunt_results(output_file_path) print('Wrote results of {0:s} to {1:s}'.format( hunt.hunt_id, output_file_path)) return results
Download current set of files in results. Args: hunt: The GRR hunt object to download files from. Returns: list: tuples containing: str: human-readable description of the source of the collection. For example, the name of the source host. str: path to the collected data. Raises: ValueError: if approval is needed and approvers were not specified.
def _read_vector(ctx: ReaderContext) -> vector.Vector: """Read a vector element from the input stream.""" start = ctx.reader.advance() assert start == "[" return _read_coll(ctx, vector.vector, "]", "vector")
Read a vector element from the input stream.
def make_tmp_name(name): """Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted. """ path, base = os.path.split(name) tmp_base = ".tmp-%s-%s" % (base, uuid4().hex) tmp_name = os.path.join(path, tmp_base) try: yield tmp_name finally: safe_remove(tmp_name)
Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted.
def spendables_for_address(address, netcode, format=None): """ Return a list of Spendable objects for the given bitcoin address. Set format to "text" or "dict" to transform return value from an object to a string or dict. This is intended to be a convenience function. There is no way to know that the list returned is a complete list of spendables for the address in question. You can verify that they really do come from the existing transaction by calling tx_utils.validate_unspents. """ if format: method = "as_%s" % format for m in service_provider_methods("spendables_for_address", get_default_providers_for_netcode(netcode)): try: spendables = m(address) if format: spendables = [getattr(s, method)() for s in spendables] return spendables except Exception: pass return []
Return a list of Spendable objects for the given bitcoin address. Set format to "text" or "dict" to transform return value from an object to a string or dict. This is intended to be a convenience function. There is no way to know that the list returned is a complete list of spendables for the address in question. You can verify that they really do come from the existing transaction by calling tx_utils.validate_unspents.
def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset): '''Write the CDX field if needed.''' if record.fields[WARCRecord.WARC_TYPE] != WARCRecord.RESPONSE \ or not re.match(r'application/http; *msgtype *= *response', record.fields[WARCRecord.CONTENT_TYPE]): return url = record.fields['WARC-Target-URI'] _logger.debug('Writing CDX record {0}.', url) http_header = record.get_http_header() if http_header: mime_type = self.parse_mimetype( http_header.fields.get('Content-Type', '') ) or '-' response_code = str(http_header.status_code) else: mime_type = '-' response_code = '-' timestamp = str(int( wpull.util.parse_iso8601_str(record.fields[WARCRecord.WARC_DATE]) )) checksum = record.fields.get('WARC-Payload-Digest', '') if checksum.startswith('sha1:'): checksum = checksum.replace('sha1:', '', 1) else: checksum = '-' raw_file_record_size_str = str(raw_file_record_size) raw_file_offset_str = str(raw_file_offset) filename = os.path.basename(self._warc_filename) record_id = record.fields[WARCRecord.WARC_RECORD_ID] fields_strs = ( url, timestamp, mime_type, response_code, checksum, raw_file_record_size_str, raw_file_offset_str, filename, record_id ) with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file: out_file.write(self.CDX_DELIMINATOR.join(fields_strs)) out_file.write('\n')
Write the CDX field if needed.
def ptmsiReallocationComplete(): """P-TMSI REALLOCATION COMPLETE Section 9.4.8""" a = TpPd(pd=0x3) b = MessageType(mesType=0x11) # 00010001 packet = a / b return packet
P-TMSI REALLOCATION COMPLETE Section 9.4.8
def sprand(m, n, density, format='csr'): """Return a random sparse matrix. Parameters ---------- m, n : int shape of the result density : float target a matrix with nnz(A) = m*n*density, 0<=density<=1 format : string sparse matrix format to return, e.g. 'csr', 'coo', etc. Return ------ A : sparse matrix m x n sparse matrix Examples -------- >>> from pyamg.gallery import sprand >>> A = sprand(5,5,3/5.0) """ m, n = int(m), int(n) # get sparsity pattern A = _rand_sparse(m, n, density, format='csr') # replace data with random values A.data = sp.rand(A.nnz) return A.asformat(format)
Return a random sparse matrix. Parameters ---------- m, n : int shape of the result density : float target a matrix with nnz(A) = m*n*density, 0<=density<=1 format : string sparse matrix format to return, e.g. 'csr', 'coo', etc. Return ------ A : sparse matrix m x n sparse matrix Examples -------- >>> from pyamg.gallery import sprand >>> A = sprand(5,5,3/5.0)
def _verifyHostKey(self, hostKey, fingerprint): """Called when ssh transport requests us to verify a given host key. Return a deferred that callback if we accept the key or errback if we decide to reject it. """ if fingerprint in self.knownHosts: return defer.succeed(True) return defer.fail(UnknownHostKey(hostKey, fingerprint))
Called when ssh transport requests us to verify a given host key. Return a deferred that callback if we accept the key or errback if we decide to reject it.
def add_extra_headers(self, sample_names): """ If there are samples, add any additional keys they might use to supplement the default headers. Return the headers headers for adding, with the format: [(header_name, header_display_name), ....] """ if not sample_names: return [] full_headers = list(self.orient_data[sample_names[0]].keys()) add_ons = [] for head in full_headers: if head not in self.header_names: add_ons.append((head, head)) return add_ons
If there are samples, add any additional keys they might use to supplement the default headers. Return the headers headers for adding, with the format: [(header_name, header_display_name), ....]
def get_elb_names(self, region, config): """ :param region: name of a region :param config: Collector config dict :return: list of elb names to query in the given region """ # This function is ripe to be memoized but when ELBs are added/removed # dynamically over time, diamond will have to be restarted to pick # up the changes. region_dict = config.get('regions', {}).get(region, {}) if 'elb_names' not in region_dict: elb_conn = boto.ec2.elb.connect_to_region(region, **self.auth_kwargs) full_elb_names = \ [elb.name for elb in elb_conn.get_all_load_balancers()] # Regular expressions for ELBs we DO NOT want to get metrics on. matchers = \ [re.compile(regex) for regex in config.get('elbs_ignored', [])] # cycle through elbs get the list of elbs that don't match elb_names = [] for elb_name in full_elb_names: if matchers and any([m.match(elb_name) for m in matchers]): continue elb_names.append(elb_name) else: elb_names = region_dict['elb_names'] return elb_names
:param region: name of a region :param config: Collector config dict :return: list of elb names to query in the given region
def send_login_code(self, code, context, **kwargs): """ Send a login code via SMS """ from_number = self.from_number or getattr(settings, 'DEFAULT_FROM_NUMBER') sms_content = render_to_string(self.template_name, context) self.twilio_client.messages.create( to=code.user.phone_number, from_=from_number, body=sms_content )
Send a login code via SMS
def prune(self, root): """Prune stale cache files If the option --cache-target-max-entry is greater than zero, then prune will remove all but n old cache files for each target/task. :param str root: The path under which cacheable artifacts will be cleaned """ max_entries_per_target = self._max_entries_per_target if os.path.isdir(root) and max_entries_per_target: safe_rm_oldest_items_in_dir(root, max_entries_per_target)
Prune stale cache files If the option --cache-target-max-entry is greater than zero, then prune will remove all but n old cache files for each target/task. :param str root: The path under which cacheable artifacts will be cleaned
def _decode_response(response): """Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code. """ content_type = response.headers.get('content-type', '') logger.debug("status[%s] content_type[%s] encoding[%s]" % (response.status_code, content_type, response.encoding)) response.raise_for_status() content = response.content.strip() if response.encoding: content = content.decode(response.encoding) if not content: logger.debug("no content in response") return content if content_type.split(';')[0] != 'application/json': return content if content.startswith(GERRIT_MAGIC_JSON_PREFIX): content = content[len(GERRIT_MAGIC_JSON_PREFIX):] try: return json.loads(content) except ValueError: logger.error('Invalid json content: %s', content) raise
Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code.
def command_drop_tables(self, meta_name=None): ''' Drops all tables without dropping a database:: ./manage.py sqla:drop_tables [meta_name] ''' answer = six.moves.input(u'All data will lost. Are you sure? [y/N] ') if answer.strip().lower()!='y': sys.exit('Interrupted') def _drop_metadata_tables(metadata): table = next(six.itervalues(metadata.tables), None) if table is None: print('Failed to find engine') else: engine = self.session.get_bind(clause=table) drop_everything(engine) print('Done') if isinstance(self.metadata, MetaData): print('Droping tables... ', end='') _drop_metadata_tables(self.metadata) else: for current_meta_name, metadata in self.metadata.items(): if meta_name not in (current_meta_name, None): continue print('Droping tables for {}... '.format(current_meta_name), end='') _drop_metadata_tables(metadata)
Drops all tables without dropping a database:: ./manage.py sqla:drop_tables [meta_name]
def rewrite_return_as_assignments(func_node, interface): """Modify FunctionDef node to directly assign instead of return.""" func_node = _RewriteReturn(interface).visit(func_node) ast.fix_missing_locations(func_node) return func_node
Modify FunctionDef node to directly assign instead of return.
def _peer_get_bfd(self, tx, rx, multiplier): """Get and merge the `bfd` config from global BGP. You should not use this method. You probably want `BGP.bfd`. Args: tx: XML document with the XML to get the transmit interval. rx: XML document with the XML to get the receive interval. multiplier: XML document with the XML to get the interval multiplier. Returns: Merged XML document. Raises: None """ tx = self._callback(tx, handler='get_config') rx = self._callback(rx, handler='get_config') multiplier = self._callback(multiplier, handler='get_config') tx = pynos.utilities.return_xml(str(tx)) rx = pynos.utilities.return_xml(str(rx)) multiplier = pynos.utilities.return_xml(str(multiplier)) config = pynos.utilities.merge_xml(tx, rx) return pynos.utilities.merge_xml(config, multiplier)
Get and merge the `bfd` config from global BGP. You should not use this method. You probably want `BGP.bfd`. Args: tx: XML document with the XML to get the transmit interval. rx: XML document with the XML to get the receive interval. multiplier: XML document with the XML to get the interval multiplier. Returns: Merged XML document. Raises: None
def enable_performance_data(self): """Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None """ if not self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = True self.my_conf.explode_global_conf() self.daemon.update_program_status()
Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None
def wait(self, seconds=None, **kw): 'Wait in a loop and react to events as defined in the namespaces' # Use ping/pong to unblock recv for polling transport self._heartbeat_thread.hurry() # Use timeout to unblock recv for websocket transport self._transport.set_timeout(seconds=1) # Listen warning_screen = self._yield_warning_screen(seconds) for elapsed_time in warning_screen: if self._should_stop_waiting(**kw): break try: try: self._process_packets() except TimeoutError: pass except KeyboardInterrupt: self._close() raise except ConnectionError as e: self._opened = False try: warning = Exception('[connection error] %s' % e) warning_screen.throw(warning) except StopIteration: self._warn(warning) try: namespace = self.get_namespace() namespace._find_packet_callback('disconnect')() except PacketError: pass self._heartbeat_thread.relax() self._transport.set_timeout()
Wait in a loop and react to events as defined in the namespaces
def _align_orthologous_gene_pairwise(self, g_id, gapopen=10, gapextend=0.5, engine='needle', parse=True, force_rerun=False): """Align orthologous strain sequences to representative Protein sequence, save as new pickle""" protein_seqs_aln_pickle_path = op.join(self.sequences_by_gene_dir, '{}_protein_withseqs_dis_aln.pckl'.format(g_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_aln_pickle_path): protein_seqs_pickle_path = self.gene_protein_pickles[g_id] protein_pickle = ssbio.io.load_pickle(protein_seqs_pickle_path) if not protein_pickle.representative_sequence: log.error('{}: no representative sequence to align to'.format(g_id)) return if len(protein_pickle.sequences) < 1: log.error('{}: no other sequences to align to'.format(g_id)) return alignment_dir = op.join(self.sequences_by_gene_dir, g_id) ssbio.utils.make_dir(alignment_dir) protein_pickle.pairwise_align_sequences_to_representative(gapopen=gapopen, gapextend=gapextend, engine=engine, outdir=alignment_dir, parse=parse, force_rerun=force_rerun) protein_pickle.save_pickle(outfile=protein_seqs_aln_pickle_path) return g_id, protein_seqs_aln_pickle_path
Align orthologous strain sequences to representative Protein sequence, save as new pickle
def next_requests(self): """Returns a request to be scheduled or none.""" use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET) fetch_one = self.server.spop if use_set else self.server.lpop # XXX: Do we need to use a timeout here? found = 0 # TODO: Use redis pipeline execution. while found < self.redis_batch_size: data = fetch_one(self.redis_key) if not data: # Queue empty. break req = self.make_request_from_data(data) if req: yield req found += 1 else: self.logger.debug("Request not made from data: %r", data) if found: self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
Returns a request to be scheduled or none.
def return_rri(self, begsam, endsam): """Return raw, irregularly-timed RRI.""" interval = endsam - begsam dat = empty(interval) k = 0 with open(self.filename, 'rt') as f: [next(f) for x in range(12)] for j, datum in enumerate(f): if begsam <= j < endsam: dat[k] = float64(datum[:datum.index('\t')]) k += 1 if k == interval: break return dat
Return raw, irregularly-timed RRI.
def dumps(self, msg, use_bin_type=False): ''' Run the correct dumps serialization format :param use_bin_type: Useful for Python 3 support. Tells msgpack to differentiate between 'str' and 'bytes' types by encoding them differently. Since this changes the wire protocol, this option should not be used outside of IPC. ''' def ext_type_encoder(obj): if isinstance(obj, six.integer_types): # msgpack can't handle the very long Python longs for jids # Convert any very long longs to strings return six.text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): # msgpack doesn't support datetime.datetime and datetime.date datatypes. # So here we have converted these types to custom datatype # This is msgpack Extended types numbered 78 return msgpack.ExtType(78, salt.utils.stringutils.to_bytes( obj.strftime('%Y%m%dT%H:%M:%S.%f'))) # The same for immutable types elif isinstance(obj, immutabletypes.ImmutableDict): return dict(obj) elif isinstance(obj, immutabletypes.ImmutableList): return list(obj) elif isinstance(obj, (set, immutabletypes.ImmutableSet)): # msgpack can't handle set so translate it to tuple return tuple(obj) elif isinstance(obj, CaseInsensitiveDict): return dict(obj) # Nothing known exceptions found. Let msgpack raise it's own. return obj try: if msgpack.version >= (0, 4, 0): # msgpack only supports 'use_bin_type' starting in 0.4.0. # Due to this, if we don't need it, don't pass it at all so # that under Python 2 we can still work with older versions # of msgpack. return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type, _msgpack_module=msgpack) else: return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, _msgpack_module=msgpack) except (OverflowError, msgpack.exceptions.PackValueError): # msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead. # Convert any very long longs to strings and call dumps again. def verylong_encoder(obj, context): # Make sure we catch recursion here. objid = id(obj) if objid in context: return '<Recursion on {} with id={}>'.format(type(obj).__name__, id(obj)) context.add(objid) if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = verylong_encoder(value, context) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = verylong_encoder(entry, context) return obj # A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack # spec. Here we care only of JIDs that are positive integers. if isinstance(obj, six.integer_types) and obj >= pow(2, 64): return six.text_type(obj) else: return obj msg = verylong_encoder(msg, set()) if msgpack.version >= (0, 4, 0): return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type, _msgpack_module=msgpack) else: return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, _msgpack_module=msgpack)
Run the correct dumps serialization format :param use_bin_type: Useful for Python 3 support. Tells msgpack to differentiate between 'str' and 'bytes' types by encoding them differently. Since this changes the wire protocol, this option should not be used outside of IPC.
def convert_meas_df_thellier_gui(meas_df_in, output): """ Take a measurement dataframe and convert column names from MagIC 2 --> 3 or vice versa. Use treat_step_num --> measurement_number if available, otherwise measurement --> measurement_number. Parameters ---------- meas_df_in : pandas DataFrame input dataframe with measurement data output : int output to MagIC 2 or MagIC 3 """ output = int(output) meas_mapping = get_thellier_gui_meas_mapping(meas_df_in, output) meas_df_out = meas_df_in.rename(columns=meas_mapping) if 'measurement' not in meas_df_out.columns: meas_df_out['measurement'] = meas_df_in['measurement'] return meas_df_out
Take a measurement dataframe and convert column names from MagIC 2 --> 3 or vice versa. Use treat_step_num --> measurement_number if available, otherwise measurement --> measurement_number. Parameters ---------- meas_df_in : pandas DataFrame input dataframe with measurement data output : int output to MagIC 2 or MagIC 3
def _ScheduleTasks(self, storage_writer): """Schedules tasks. Args: storage_writer (StorageWriter): storage writer for a session storage. """ logger.debug('Task scheduler started') self._status = definitions.STATUS_INDICATOR_RUNNING # TODO: make tasks persistent. # TODO: protect task scheduler loop by catch all and # handle abort path. event_source_heap = _EventSourceHeap() self._FillEventSourceHeap( storage_writer, event_source_heap, start_with_first=True) event_source = event_source_heap.PopEventSource() task = None while event_source or self._task_manager.HasPendingTasks(): if self._abort: break try: if not task: task = self._task_manager.CreateRetryTask() if not task and event_source: task = self._task_manager.CreateTask(self._session_identifier) task.file_entry_type = event_source.file_entry_type task.path_spec = event_source.path_spec event_source = None self._number_of_consumed_sources += 1 if self._guppy_memory_profiler: self._guppy_memory_profiler.Sample() if task: if self._ScheduleTask(task): logger.debug( 'Scheduled task {0:s} for path specification {1:s}'.format( task.identifier, task.path_spec.comparable)) self._task_manager.SampleTaskStatus(task, 'scheduled') task = None else: self._task_manager.SampleTaskStatus(task, 'schedule_attempted') self._MergeTaskStorage(storage_writer) if not event_source_heap.IsFull(): self._FillEventSourceHeap(storage_writer, event_source_heap) if not task and not event_source: event_source = event_source_heap.PopEventSource() except KeyboardInterrupt: self._abort = True self._processing_status.aborted = True if self._status_update_callback: self._status_update_callback(self._processing_status) for task in self._task_manager.GetFailedTasks(): warning = warnings.ExtractionWarning( message='Worker failed to process path specification', path_spec=task.path_spec) self._storage_writer.AddWarning(warning) self._processing_status.error_path_specs.append(task.path_spec) self._status = definitions.STATUS_INDICATOR_IDLE if self._abort: logger.debug('Task scheduler aborted') else: logger.debug('Task scheduler stopped')
Schedules tasks. Args: storage_writer (StorageWriter): storage writer for a session storage.
def set_order(self, order): """ Takes a list of dictionaries. Those correspond to the arguments of `list.sort` and must contain the keys 'key' and 'reverse' (a boolean). You must call `set_labels` before this! """ m = gtk.ListStore(bool, str) for item in order: m.append( (item['reverse'], item['key']) ) # TODO fill with __labels missing in order. self.set_model(m)
Takes a list of dictionaries. Those correspond to the arguments of `list.sort` and must contain the keys 'key' and 'reverse' (a boolean). You must call `set_labels` before this!
def index(self, x, x_link=None): """Make an index of record pairs. Use a custom function to make record pairs of one or two dataframes. Each function should return a pandas.MultiIndex with record pairs. Parameters ---------- x: pandas.DataFrame A pandas DataFrame. When `x_link` is None, the algorithm makes record pairs within the DataFrame. When `x_link` is not empty, the algorithm makes pairs between `x` and `x_link`. x_link: pandas.DataFrame, optional A second DataFrame to link with the DataFrame x. Returns ------- pandas.MultiIndex A pandas.MultiIndex with record pairs. Each record pair contains the index labels of two records. """ if x is None: # error raise ValueError("provide at least one dataframe") elif x_link is not None: # linking (two arg) x = (x, x_link) elif isinstance(x, (list, tuple)): # dedup or linking (single arg) x = tuple(x) else: # dedup (single arg) x = (x,) if self.verify_integrity: for df in x: self._verify_integrety(df) # linking if not self._deduplication(x): pairs = self._link_index(*x) names = self._make_index_names(x[0].index.name, x[1].index.name) # deduplication else: pairs = self._dedup_index(*x) names = self._make_index_names(x[0].index.name, x[0].index.name) pairs.rename(names, inplace=True) return pairs
Make an index of record pairs. Use a custom function to make record pairs of one or two dataframes. Each function should return a pandas.MultiIndex with record pairs. Parameters ---------- x: pandas.DataFrame A pandas DataFrame. When `x_link` is None, the algorithm makes record pairs within the DataFrame. When `x_link` is not empty, the algorithm makes pairs between `x` and `x_link`. x_link: pandas.DataFrame, optional A second DataFrame to link with the DataFrame x. Returns ------- pandas.MultiIndex A pandas.MultiIndex with record pairs. Each record pair contains the index labels of two records.
def eval_objfn(self): """Compute components of objective function as well as total contribution to objective function. """ dfd = self.obfn_dfd() reg = self.obfn_reg() obj = dfd + reg[0] return (obj, dfd) + reg[1:]
Compute components of objective function as well as total contribution to objective function.
def compare_baselines(old_baseline_filename, new_baseline_filename): """ This function enables developers to more easily configure plugin settings, by comparing two generated baselines and highlighting their differences. For effective use, a few assumptions are made: 1. Baselines are sorted by (filename, line_number, hash). This allows for a deterministic order, when doing a side-by-side comparison. 2. Baselines are generated for the same codebase snapshot. This means that we won't have cases where secrets are moved around; only added or removed. NOTE: We don't want to do a version check, because we want to be able to use this functionality across versions (to see how the new version fares compared to the old one). """ if old_baseline_filename == new_baseline_filename: raise RedundantComparisonError old_baseline = _get_baseline_from_file(old_baseline_filename) new_baseline = _get_baseline_from_file(new_baseline_filename) _remove_nonexistent_files_from_baseline(old_baseline) _remove_nonexistent_files_from_baseline(new_baseline) # We aggregate the secrets first, so that we can display a total count. secrets_to_compare = _get_secrets_to_compare(old_baseline, new_baseline) total_reviews = len(secrets_to_compare) current_index = 0 secret_iterator = BidirectionalIterator(secrets_to_compare) for filename, secret, is_removed in secret_iterator: _clear_screen() current_index += 1 header = '{} {}' if is_removed: plugins_used = old_baseline['plugins_used'] header = header.format( colorize('Status:', AnsiColor.BOLD), '>> {} <<'.format( colorize('REMOVED', AnsiColor.RED), ), ) else: plugins_used = new_baseline['plugins_used'] header = header.format( colorize('Status:', AnsiColor.BOLD), '>> {} <<'.format( colorize('ADDED', AnsiColor.LIGHT_GREEN), ), ) try: _print_context( filename, secret, current_index, total_reviews, plugins_used, additional_header_lines=header, force=is_removed, ) decision = _get_user_decision( can_step_back=secret_iterator.can_step_back(), prompt_secret_decision=False, ) except SecretNotFoundOnSpecifiedLineError: decision = _get_user_decision(prompt_secret_decision=False) if decision == 'q': print('Quitting...') break if decision == 'b': # pragma: no cover current_index -= 2 secret_iterator.step_back_on_next_iteration()
This function enables developers to more easily configure plugin settings, by comparing two generated baselines and highlighting their differences. For effective use, a few assumptions are made: 1. Baselines are sorted by (filename, line_number, hash). This allows for a deterministic order, when doing a side-by-side comparison. 2. Baselines are generated for the same codebase snapshot. This means that we won't have cases where secrets are moved around; only added or removed. NOTE: We don't want to do a version check, because we want to be able to use this functionality across versions (to see how the new version fares compared to the old one).
def get_tablenames(cur): """ Conveinience: """ cur.execute("SELECT name FROM sqlite_master WHERE type='table'") tablename_list_ = cur.fetchall() tablename_list = [str(tablename[0]) for tablename in tablename_list_ ] return tablename_list
Conveinience:
def changeLocalUserPassword(self, login, user, password): """ Parameters: - login - user - password """ self.send_changeLocalUserPassword(login, user, password) self.recv_changeLocalUserPassword()
Parameters: - login - user - password
def text_fd_to_metric_families(fd): """Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's. """ name = None allowed_names = [] eof = False seen_metrics = set() def build_metric(name, documentation, typ, unit, samples): if name in seen_metrics: raise ValueError("Duplicate metric: " + name) seen_metrics.add(name) if typ is None: typ = 'unknown' if documentation is None: documentation = '' if unit is None: unit = '' if unit and not name.endswith("_" + unit): raise ValueError("Unit does not match metric name: " + name) if unit and typ in ['info', 'stateset']: raise ValueError("Units not allowed for this metric type: " + name) if typ in ['histogram', 'gaugehistogram']: _check_histogram(samples, name) metric = Metric(name, documentation, typ, unit) # TODO: check labelvalues are valid utf8 metric.samples = samples return metric for line in fd: if line[-1] == '\n': line = line[:-1] if eof: raise ValueError("Received line after # EOF: " + line) if line == '# EOF': eof = True elif line.startswith('#'): parts = line.split(' ', 3) if len(parts) < 4: raise ValueError("Invalid line: " + line) if parts[2] == name and samples: raise ValueError("Received metadata after samples: " + line) if parts[2] != name: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # New metric name = parts[2] unit = None typ = None documentation = None group = None seen_groups = set() group_timestamp = None group_timestamp_samples = set() samples = [] allowed_names = [parts[2]] if parts[1] == 'HELP': if documentation is not None: raise ValueError("More than one HELP for metric: " + line) if len(parts) == 4: documentation = _unescape_help(parts[3]) elif len(parts) == 3: raise ValueError("Invalid line: " + line) elif parts[1] == 'TYPE': if typ is not None: raise ValueError("More than one TYPE for metric: " + line) typ = parts[3] if typ == 'untyped': raise ValueError("Invalid TYPE for metric: " + line) allowed_names = { 'counter': ['_total', '_created'], 'summary': ['_count', '_sum', '', '_created'], 'histogram': ['_count', '_sum', '_bucket', '_created'], 'gaugehistogram': ['_gcount', '_gsum', '_bucket'], 'info': ['_info'], }.get(typ, ['']) allowed_names = [name + n for n in allowed_names] elif parts[1] == 'UNIT': if unit is not None: raise ValueError("More than one UNIT for metric: " + line) unit = parts[3] else: raise ValueError("Invalid line: " + line) else: sample = _parse_sample(line) if sample.name not in allowed_names: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # Start an unknown metric. name = sample.name documentation = None unit = None typ = 'unknown' samples = [] group = None group_timestamp = None group_timestamp_samples = set() seen_groups = set() allowed_names = [sample.name] if typ == 'stateset' and name not in sample.labels: raise ValueError("Stateset missing label: " + line) if (typ in ['histogram', 'gaugehistogram'] and name + '_bucket' == sample.name and (float(sample.labels.get('le', -1)) < 0 or sample.labels['le'] != floatToGoString(sample.labels['le']))): raise ValueError("Invalid le label: " + line) if (typ == 'summary' and name == sample.name and (not (0 <= float(sample.labels.get('quantile', -1)) <= 1) or sample.labels['quantile'] != floatToGoString(sample.labels['quantile']))): raise ValueError("Invalid quantile label: " + line) g = tuple(sorted(_group_for_sample(sample, name, typ).items())) if group is not None and g != group and g in seen_groups: raise ValueError("Invalid metric grouping: " + line) if group is not None and g == group: if (sample.timestamp is None) != (group_timestamp is None): raise ValueError("Mix of timestamp presence within a group: " + line) if group_timestamp is not None and group_timestamp > sample.timestamp and typ != 'info': raise ValueError("Timestamps went backwards within a group: " + line) else: group_timestamp_samples = set() series_id = (sample.name, tuple(sorted(sample.labels.items()))) if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples: # Not a duplicate due to timestamp truncation. samples.append(sample) group_timestamp_samples.add(series_id) group = g group_timestamp = sample.timestamp seen_groups.add(g) if typ == 'stateset' and sample.value not in [0, 1]: raise ValueError("Stateset samples can only have values zero and one: " + line) if typ == 'info' and sample.value != 1: raise ValueError("Info samples can only have value one: " + line) if typ == 'summary' and name == sample.name and sample.value < 0: raise ValueError("Quantile values cannot be negative: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and math.isnan( sample.value): raise ValueError("Counter-like samples cannot be NaN: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and sample.value < 0: raise ValueError("Counter-like samples cannot be negative: " + line) if sample.exemplar and not ( typ in ['histogram', 'gaugehistogram'] and sample.name.endswith('_bucket')): raise ValueError("Invalid line only histogram/gaugehistogram buckets can have exemplars: " + line) if name is not None: yield build_metric(name, documentation, typ, unit, samples) if not eof: raise ValueError("Missing # EOF at end")
Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's.
def schedule_host_check(self, host, check_time): """Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None """ host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=check_time) self.send_an_element(host.get_update_status_brok())
Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None
def delete_quick(self, get_count=False): """ Deletes the table without cascading and without user prompt. If this table has populated dependent tables, this will fail. """ query = 'DELETE FROM ' + self.full_table_name + self.where_clause self.connection.query(query) count = self.connection.query("SELECT ROW_COUNT()").fetchone()[0] if get_count else None self._log(query[:255]) return count
Deletes the table without cascading and without user prompt. If this table has populated dependent tables, this will fail.
def convert_coord(coord_from,matrix_file,base_to_aligned=True): '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``''' with open(matrix_file) as f: try: values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()] except: nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error) return False if len(values)!=12: nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error) return False matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1])) if not base_to_aligned: matrix = np.linalg.inv(matrix) return np.dot(matrix,list(coord_from) + [1])[:3]
Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``
def query(self, query_dict: Dict[str, Any]) -> None: """ 重写 query """ self.parse_url.query = cast(Any, query_dict)
重写 query
async def list(self, *, filters: Mapping = None) -> List[Mapping]: """ Return a list of services Args: filters: a dict with a list of filters Available filters: id=<service id> label=<service label> mode=["replicated"|"global"] name=<service name> """ params = {"filters": clean_filters(filters)} response = await self.docker._query_json( "services", method="GET", params=params ) return response
Return a list of services Args: filters: a dict with a list of filters Available filters: id=<service id> label=<service label> mode=["replicated"|"global"] name=<service name>
def create_project(type, schema, server, name, output, verbose): """Create a new project on an entity matching server. See entity matching service documentation for details on mapping type and schema Returns authentication details for the created project. """ if verbose: log("Entity Matching Server: {}".format(server)) if schema is not None: schema_json = json.load(schema) # Validate the schema clkhash.schema.validate_schema_dict(schema_json) else: raise ValueError("Schema must be provided when creating new linkage project") name = name if name is not None else '' # Creating new project try: project_creation_reply = project_create(server, schema_json, type, name) except ServiceError as e: log("Unexpected response - {}".format(e.status_code)) log(e.text) raise SystemExit else: log("Project created") json.dump(project_creation_reply, output)
Create a new project on an entity matching server. See entity matching service documentation for details on mapping type and schema Returns authentication details for the created project.
def create_weather(self, **kwargs): """ Creates an instance of the Asset Service. """ weather = predix.admin.weather.WeatherForecast(**kwargs) weather.create() client_id = self.get_client_id() if client_id: weather.grant_client(client_id) weather.grant_client(client_id) weather.add_to_manifest(self) return weather
Creates an instance of the Asset Service.
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE): """Download an aff4 file to the local filesystem overwriting it if it exists. Args: file_obj: An aff4 object that supports the file interface (Read, Seek) target_path: Full path of file to write to. buffer_size: Read in chunks this size. """ logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path) target_file = open(target_path, "wb") file_obj.Seek(0) count = 0 data_buffer = file_obj.Read(buffer_size) while data_buffer: target_file.write(data_buffer) data_buffer = file_obj.Read(buffer_size) count += 1 if not count % 3: logging.debug(u"Downloading: %s: %s done", file_obj.urn, utils.FormatNumberAsString(count * buffer_size)) target_file.close()
Download an aff4 file to the local filesystem overwriting it if it exists. Args: file_obj: An aff4 object that supports the file interface (Read, Seek) target_path: Full path of file to write to. buffer_size: Read in chunks this size.