code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def partition(list_, columns=2): """ Break a list into ``columns`` number of columns. """ iter_ = iter(list_) columns = int(columns) rows = [] while True: row = [] for column_number in range(1, columns + 1): try: value = six.next(iter_) except StopIteration: pass else: row.append(value) if not row: return rows rows.append(row)
Break a list into ``columns`` number of columns.
def stream(self, area_code=values.unset, contains=values.unset, sms_enabled=values.unset, mms_enabled=values.unset, voice_enabled=values.unset, exclude_all_address_required=values.unset, exclude_local_address_required=values.unset, exclude_foreign_address_required=values.unset, beta=values.unset, near_number=values.unset, near_lat_long=values.unset, distance=values.unset, in_postal_code=values.unset, in_region=values.unset, in_rate_center=values.unset, in_lata=values.unset, in_locality=values.unset, fax_enabled=values.unset, limit=None, page_size=None): """ Streams VoipInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode area_code: The area code of the phone numbers to read :param unicode contains: The pattern on which to match phone numbers :param bool sms_enabled: Whether the phone numbers can receive text messages :param bool mms_enabled: Whether the phone numbers can receive MMS messages :param bool voice_enabled: Whether the phone numbers can receive calls. :param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address :param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address :param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address :param bool beta: Whether to read phone numbers new to the Twilio platform :param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only) :param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only) :param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only) :param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only) :param unicode in_region: Limit results to a particular region. (US/Canada only) :param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only) :param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only) :param unicode in_locality: Limit results to a particular locality :param bool fax_enabled: Whether the phone numbers can receive faxes :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( area_code=area_code, contains=contains, sms_enabled=sms_enabled, mms_enabled=mms_enabled, voice_enabled=voice_enabled, exclude_all_address_required=exclude_all_address_required, exclude_local_address_required=exclude_local_address_required, exclude_foreign_address_required=exclude_foreign_address_required, beta=beta, near_number=near_number, near_lat_long=near_lat_long, distance=distance, in_postal_code=in_postal_code, in_region=in_region, in_rate_center=in_rate_center, in_lata=in_lata, in_locality=in_locality, fax_enabled=fax_enabled, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
Streams VoipInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode area_code: The area code of the phone numbers to read :param unicode contains: The pattern on which to match phone numbers :param bool sms_enabled: Whether the phone numbers can receive text messages :param bool mms_enabled: Whether the phone numbers can receive MMS messages :param bool voice_enabled: Whether the phone numbers can receive calls. :param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address :param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address :param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address :param bool beta: Whether to read phone numbers new to the Twilio platform :param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only) :param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only) :param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only) :param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only) :param unicode in_region: Limit results to a particular region. (US/Canada only) :param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only) :param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only) :param unicode in_locality: Limit results to a particular locality :param bool fax_enabled: Whether the phone numbers can receive faxes :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance]
def _fmt_float(cls, x, **kw): """Float formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument. """ n = kw.get('n') return '%.*f' % (n, cls._to_float(x))
Float formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument.
def _api_get(self, url, **kwargs): """ A convenience wrapper for _get. Adds headers, auth and base url by default """ kwargs['url'] = self.url + url kwargs['auth'] = self.auth headers = deepcopy(self.headers) headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers return self._get(**kwargs)
A convenience wrapper for _get. Adds headers, auth and base url by default
def remove_aspera_coordinator(self, transfer_coordinator): ''' remove entry from the waiting waiting or remove item from processig queue and add to processed quque notify background thread as it may be able to process watiign requests ''' # usually called on processing completion - but can be called for a cancel if self._in_waiting_queue(transfer_coordinator): logger.info("Remove from waiting queue count=%d" % self.waiting_coordinator_count()) with self._lockw: self._waiting_transfer_coordinators.remove(transfer_coordinator) else: logger.info("Remove from processing queue count=%d" % self.tracked_coordinator_count()) try: self.remove_transfer_coordinator(transfer_coordinator) self.append_processed_queue(transfer_coordinator) except Exception: pass self._wakeup_processing_thread()
remove entry from the waiting waiting or remove item from processig queue and add to processed quque notify background thread as it may be able to process watiign requests
def train_associations_SingleSNP(X, Y, U, S, C, numintervals, ldeltamin, ldeltamax): """ train_associations_SingleSNP(MatrixXd const & X, MatrixXd const & Y, MatrixXd const & U, MatrixXd const & S, MatrixXd const & C, int numintervals, double ldeltamin, double ldeltamax) Parameters ---------- X: MatrixXd const & Y: MatrixXd const & U: MatrixXd const & S: MatrixXd const & C: MatrixXd const & numintervals: int ldeltamin: double ldeltamax: double """ return _core.train_associations_SingleSNP(X, Y, U, S, C, numintervals, ldeltamin, ldeltamax)
train_associations_SingleSNP(MatrixXd const & X, MatrixXd const & Y, MatrixXd const & U, MatrixXd const & S, MatrixXd const & C, int numintervals, double ldeltamin, double ldeltamax) Parameters ---------- X: MatrixXd const & Y: MatrixXd const & U: MatrixXd const & S: MatrixXd const & C: MatrixXd const & numintervals: int ldeltamin: double ldeltamax: double
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): """Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) return x + signal
Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x.
def ws010c(self, value=None): """ Corresponds to IDD Field `ws010c` Wind speed corresponding to 1.0% cumulative frequency of occurrence for coldest month; Args: value (float): value for IDD Field `ws010c` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws010c`'.format(value)) self._ws010c = value
Corresponds to IDD Field `ws010c` Wind speed corresponding to 1.0% cumulative frequency of occurrence for coldest month; Args: value (float): value for IDD Field `ws010c` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def is_valid_int_param(param): """Verifica se o parâmetro é um valor inteiro válido. :param param: Valor para ser validado. :return: True se o parâmetro tem um valor inteiro válido, ou False, caso contrário. """ if param is None: return False try: param = int(param) if param < 0: return False except (TypeError, ValueError): return False return True
Verifica se o parâmetro é um valor inteiro válido. :param param: Valor para ser validado. :return: True se o parâmetro tem um valor inteiro válido, ou False, caso contrário.
def _init_bins(self, binset): """Calculated binned wavelength centers, edges, and flux. By contrast, the native waveset and flux should be considered samples of a continuous function. Thus, it makes sense to interpolate ``self.waveset`` and ``self(self.waveset)``, but not `binset` and `binflux`. """ if binset is None: if self.bandpass.waveset is not None: self._binset = self.bandpass.waveset elif self.spectrum.waveset is not None: self._binset = self.spectrum.waveset log.info('Bandpass waveset is undefined; ' 'Using source spectrum waveset instead.') else: raise exceptions.UndefinedBinset( 'Both source spectrum and bandpass have undefined ' 'waveset; Provide binset manually.') else: self._binset = self._validate_wavelengths(binset) # binset must be in ascending order for calcbinflux() # to work properly. if self._binset[0] > self._binset[-1]: self._binset = self._binset[::-1] self._bin_edges = binning.calculate_bin_edges(self._binset) # Merge bin edges and centers in with the natural waveset spwave = utils.merge_wavelengths( self._bin_edges.value, self._binset.value) if self.waveset is not None: spwave = utils.merge_wavelengths(spwave, self.waveset.value) # Throw out invalid wavelengths after merging. spwave = spwave[spwave > 0] # Compute indices associated to each endpoint. indices = np.searchsorted(spwave, self._bin_edges.value) i_beg = indices[:-1] i_end = indices[1:] # Prepare integration variables. flux = self(spwave) avflux = (flux.value[1:] + flux.value[:-1]) * 0.5 deltaw = spwave[1:] - spwave[:-1] # Sum over each bin. binflux, intwave = binning.calcbinflux( self._binset.size, i_beg, i_end, avflux, deltaw) self._binflux = binflux * flux.unit
Calculated binned wavelength centers, edges, and flux. By contrast, the native waveset and flux should be considered samples of a continuous function. Thus, it makes sense to interpolate ``self.waveset`` and ``self(self.waveset)``, but not `binset` and `binflux`.
def p_ComplianceModules(self, p): """ComplianceModules : ComplianceModules ComplianceModule | ComplianceModule""" n = len(p) if n == 3: p[0] = ('ComplianceModules', p[1][1] + [p[2]]) elif n == 2: p[0] = ('ComplianceModules', [p[1]])
ComplianceModules : ComplianceModules ComplianceModule | ComplianceModule
def peek(self, n): """Peek an n bit integer from the stream without updating the pointer. It is not an error to read beyond the end of the stream. >>> olleke.data[:2]==b'\x1b\x2e' and 0x2e1b==11803 True >>> olleke.peek(15) 11803 >>> hex(olleke.peek(32)) '0x2e1b' """ #read bytes that contain the data: self.data[self.pos>>3:self.pos+n+7>>3] #convert to int: int.from_bytes(..., 'little') #shift out the bits from the first byte: >>(self.pos&7) #mask unwanted bits: & (1<<n)-1 return int.from_bytes( self.data[self.pos>>3:self.pos+n+7>>3], 'little')>>(self.pos&7) & (1<<n)-1
Peek an n bit integer from the stream without updating the pointer. It is not an error to read beyond the end of the stream. >>> olleke.data[:2]==b'\x1b\x2e' and 0x2e1b==11803 True >>> olleke.peek(15) 11803 >>> hex(olleke.peek(32)) '0x2e1b'
def websocket_url_for_server_url(url): ''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form. ''' if url.startswith("http:"): reprotocoled = "ws" + url[4:] elif url.startswith("https:"): reprotocoled = "wss" + url[5:] else: raise ValueError("URL has unknown protocol " + url) if reprotocoled.endswith("/"): return reprotocoled + "ws" else: return reprotocoled + "/ws"
Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form.
def create_pool(self, name, raid_groups, description=None, **kwargs): """Create pool based on RaidGroupParameter. :param name: pool name :param raid_groups: a list of *RaidGroupParameter* :param description: pool description :param alert_threshold: Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage. :param is_harvest_enabled: True - Enable pool harvesting for the pool. False - Disable pool harvesting for the pool. :param is_snap_harvest_enabled: True - Enable snapshot harvesting for the pool. False - Disable snapshot harvesting for the pool. :param pool_harvest_high_threshold: Pool used space high threshold at which the system will automatically starts to delete snapshots in the pool :param pool_harvest_low_threshold: Pool used space low threshold under which the system will automatically stop deletion of snapshots in the pool :param snap_harvest_high_threshold: Snapshot used space high threshold at which the system automatically starts to delete snapshots in the pool :param snap_harvest_low_threshold: Snapshot used space low threshold below which the system will stop automatically deleting snapshots in the pool :param is_fast_cache_enabled: True - FAST Cache will be enabled for this pool. False - FAST Cache will be disabled for this pool. :param is_fastvp_enabled: True - Enable scheduled data relocations for the pool. False - Disable scheduled data relocations for the pool. :param pool_type: StoragePoolTypeEnum.TRADITIONAL - Create traditional pool. StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default) """ return UnityPool.create(self._cli, name=name, description=description, raid_groups=raid_groups, **kwargs)
Create pool based on RaidGroupParameter. :param name: pool name :param raid_groups: a list of *RaidGroupParameter* :param description: pool description :param alert_threshold: Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage. :param is_harvest_enabled: True - Enable pool harvesting for the pool. False - Disable pool harvesting for the pool. :param is_snap_harvest_enabled: True - Enable snapshot harvesting for the pool. False - Disable snapshot harvesting for the pool. :param pool_harvest_high_threshold: Pool used space high threshold at which the system will automatically starts to delete snapshots in the pool :param pool_harvest_low_threshold: Pool used space low threshold under which the system will automatically stop deletion of snapshots in the pool :param snap_harvest_high_threshold: Snapshot used space high threshold at which the system automatically starts to delete snapshots in the pool :param snap_harvest_low_threshold: Snapshot used space low threshold below which the system will stop automatically deleting snapshots in the pool :param is_fast_cache_enabled: True - FAST Cache will be enabled for this pool. False - FAST Cache will be disabled for this pool. :param is_fastvp_enabled: True - Enable scheduled data relocations for the pool. False - Disable scheduled data relocations for the pool. :param pool_type: StoragePoolTypeEnum.TRADITIONAL - Create traditional pool. StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default)
def get_foreign_keys_in_altered_table(self, diff): """ :param diff: The table diff :type diff: eloquent.dbal.table_diff.TableDiff :rtype: list """ foreign_keys = diff.from_table.get_foreign_keys() column_names = self.get_column_names_in_altered_table(diff) for key, constraint in foreign_keys.items(): changed = False local_columns = [] for column_name in constraint.get_local_columns(): normalized_column_name = column_name.lower() if normalized_column_name not in column_names: del foreign_keys[key] break else: local_columns.append(column_names[normalized_column_name]) if column_name != column_names[normalized_column_name]: changed = True if changed: pass return foreign_keys
:param diff: The table diff :type diff: eloquent.dbal.table_diff.TableDiff :rtype: list
def startproject(project_name): """ build a full status project """ # the destination path dst_path = os.path.join(os.getcwd(), project_name) start_init_info(dst_path) # create dst path _mkdir_p(dst_path) # create project tree os.chdir(dst_path) # create files init_code('manage.py', _manage_admin_code) init_code('requirement.txt', _requirement_admin_code) init_code('config.py', _config_sql_code) # create app/ app_path = os.path.join(dst_path, 'app') _mkdir_p(app_path) # create files os.chdir(app_path) init_code('models.py', _models_admin_code) init_code('__init__.py', _init_admin_code) # create templates and static css_path, templates_path = create_templates_static_files(app_path) # create css files os.chdir(css_path) init_code('sign.css', _auth_login_css_code) # create main blueprint create_blueprint( app_path, 'main', _views_blueprint_code % ('main', 'main'), _forms_basic_code, templates_path ) # create auth blueprint auth_templates_path = create_blueprint( app_path, 'auth', _auth_views_code, _auth_forms_code, templates_path ) # create auth templates files os.chdir(auth_templates_path) init_code('login.html', _auth_login_html_code) # create admin site admin_path = os.path.join(app_path, 'admin') _mkdir_p(admin_path) # create admin files os.chdir(admin_path) init_code('__init__.py', '') init_code('views.py', _admin_views_code) # create admin templates os.chdir(templates_path) admin_templates_path = os.path.join(templates_path, 'admin') _mkdir_p(admin_templates_path) # create admin templates files os.chdir(admin_templates_path) init_code('index.html', _admin_index_html_code) init_code('logout.html', _admin_logout_html_code) init_done_info()
build a full status project
def res_obs_v_sim(pst,logger=None, filename=None, **kwargs): """ timeseries plot helper...in progress """ if logger is None: logger=Logger('Default_Loggger.log',echo=False) logger.log("plot res_obs_v_sim") if "ensemble" in kwargs: try: res=pst_utils.res_from_en(pst,kwargs['ensemble']) except: logger.statement("res_1to1: could not find ensemble file {0}".format(kwargs['ensemble'])) else: try: res = pst.res except: logger.lraise("res_phi_pie: pst.res is None, couldn't find residuals file") obs = pst.observation_data if "grouper" in kwargs: raise NotImplementedError() else: grouper = obs.groupby(obs.obgnme).groups fig = plt.figure(figsize=figsize) if "fig_title" in kwargs: plt.figtext(0.5,0.5,kwargs["fig_title"]) else: plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='obs_v_sim')\nfrom pest control file '{0}'\n at {1}" .format(pst.filename, str(datetime.now())), ha="center") figs = [] ax_count = 0 axes = None for g, names in grouper.items(): logger.log("plotting obs_v_sim for {0}".format(g)) obs_g = obs.loc[names, :] obs_g.loc[:, "sim"] = res.loc[names, "modelled"] if "include_zero" not in kwargs or kwargs["include_zero"] is False: obs_g = obs_g.loc[obs_g.weight > 0, :] if obs_g.shape[0] == 0: logger.statement("no non-zero obs for group '{0}'".format(g)) logger.log("plotting obs_v_sim for {0}".format(g)) continue # parse datetimes # suffix in decreasing magnitude (yearmonthday) try: obs_g.loc[:, "datetime_str"] = obs_g.obsnme.apply(lambda x: x.split('_')[-1]) except: logger.statement("res_obs_v_sim error forming datetime_str") continue # Default datetime try: obs_g.loc[:, "datetime"] = pd.to_datetime(obs_g.datetime_str,format="%Y%m%d") except: logger.statement("res_obs_v_sim error casting datetime using default {0}".format(g)) # abbreviated date format ALWAY 2 digit year, mabye followed by 2 digit month, maybe followed by 2 digit day obs_g['datetime']=0 try: obs_g.loc[obs_g['datetime_str'].str.len()==2,'datetime']=pd.to_datetime(obs_g['datetime_str']+'1231',format='%y%m%d') except: logger.statement("res_obs_v_sim error casting datetime using %y {0}".format(g)) try: obs_g.loc[obs_g['datetime_str'].str.len()==4,'datetime']=pd.to_datetime(obs_g['datetime_str'].astype(str)+'01',format='%y%m%d')+datetime.timedelta(months=1)-datetime.timedelta(days=1) except: logger.statement("res_obs_v_sim error casting datetime using %y%m {0}".format(g)) try: obs_g.loc[obs_g['datetime_str'].str.len()==6,'datetime']=pd.to_datetime(obs_g['datetime_str'].astype(str),format='%y%m%d') except: logger.statement("res_obs_v_sim error casting datetime using %y%m%d {0}".format(g)) if ax_count % (nr * nc) == 0: plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) fig = plt.figure(figsize=figsize) axes = get_page_axes() ax_count = 0 ax = axes[ax_count] obs_g.loc[:,"site"] = obs_g.obsnme.apply(lambda x: x.split('_')[0]) for site in obs_g.site.unique(): obs_s = obs_g.loc[obs_g.site==site,:] obs_s.sort_values(by="datetime") ax.plot(obs_s.datetime, obs_s.obsval, ls='-', marker='.', ms=10, color='b') ax.plot(obs_s.datetime, obs_s.sim, ls='-', marker='.', ms=10, color='0.5') ax.set_xlim(obs_g.datetime.min(),obs_g.datetime.max()) ax.grid() ax.set_xlabel("datetime",labelpad=0.1) ax.set_title("{0}) group:{1}, {2} observations". format(abet[ax_count], g, names.shape[0]), loc="left") ax_count += 1 logger.log("plotting obs_v_sim for {0}".format(g)) if axes is None: return for a in range(ax_count,nr*nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) if filename is not None: with PdfPages(pst.filename.replace(".pst", ".obs_v_sim.pdf")) as pdf: for fig in figs: pdf.savefig(fig) plt.close(fig) logger.log("plot res_obs_v_sim") else: logger.log("plot res_obs_v_sim") return figs
timeseries plot helper...in progress
def display_for_value(value, request=None): """ Converts humanized value examples: boolean True/Talse ==> Yes/No objects ==> object display name with link if current user has permissions to see the object datetime ==> in localized format """ from is_core.utils.compatibility import admin_display_for_value if request and isinstance(value, Model): return render_model_object_with_link(request, value) else: return ( (value and ugettext('Yes') or ugettext('No')) if isinstance(value, bool) else admin_display_for_value(value) )
Converts humanized value examples: boolean True/Talse ==> Yes/No objects ==> object display name with link if current user has permissions to see the object datetime ==> in localized format
def min_value(self): """ The minimum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.min(self.values)
The minimum pixel value of the ``data`` within the source segment.
def dshield_ip_check(ip): """Checks dshield for info on an IP address""" if not is_IPv4Address(ip): return None headers = {'User-Agent': useragent} url = 'https://isc.sans.edu/api/ip/' response = requests.get('{0}{1}?json'.format(url, ip), headers=headers) return response.json()
Checks dshield for info on an IP address
def remove_binding(site, hostheader='', ipaddress='*', port=80): ''' Remove an IIS binding. Args: site (str): The IIS site name. hostheader (str): The host header of the binding. ipaddress (str): The IP address of the binding. port (int): The TCP port of the binding. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_binding site='site0' hostheader='example.com' ipaddress='*' port='80' ''' name = _get_binding_info(hostheader, ipaddress, port) current_bindings = list_bindings(site) if name not in current_bindings: log.debug('Binding already absent: %s', name) return True ps_cmd = ['Remove-WebBinding', '-HostHeader', "'{0}'".format(hostheader), '-IpAddress', "'{0}'".format(ipaddress), '-Port', "'{0}'".format(port)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret['retcode'] != 0: msg = 'Unable to remove binding: {0}\nError: {1}' \ ''.format(site, cmd_ret['stderr']) raise CommandExecutionError(msg) if name not in list_bindings(site): log.debug('Binding removed successfully: %s', site) return True log.error('Unable to remove binding: %s', site) return False
Remove an IIS binding. Args: site (str): The IIS site name. hostheader (str): The host header of the binding. ipaddress (str): The IP address of the binding. port (int): The TCP port of the binding. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
def leaveEvent( self, event ): """ Toggles the display for the tracker item. """ item = self.trackerItem() if ( item ): item.setVisible(False)
Toggles the display for the tracker item.
def _authenticate_cram_md5(credentials, sock_info): """Authenticate using CRAM-MD5 (RFC 2195) """ source = credentials.source username = credentials.username password = credentials.password # The password used as the mac key is the # same as what we use for MONGODB-CR passwd = _password_digest(username, password) cmd = SON([('saslStart', 1), ('mechanism', 'CRAM-MD5'), ('payload', Binary(b'')), ('autoAuthorize', 1)]) response = sock_info.command(source, cmd) # MD5 as implicit default digest for digestmod is deprecated # in python 3.4 mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=md5) mac.update(response['payload']) challenge = username.encode('utf-8') + b' ' + b(mac.hexdigest()) cmd = SON([('saslContinue', 1), ('conversationId', response['conversationId']), ('payload', Binary(challenge))]) sock_info.command(source, cmd)
Authenticate using CRAM-MD5 (RFC 2195)
def _grouper(iterable, n_args, fillvalue=None): """ Banana banana """ args = [iter(iterable)] * n_args return zip_longest(*args, fillvalue=fillvalue)
Banana banana
def get(self, query, sort, page, size): """Get a list of all the communities. .. http:get:: /communities/(string:id) Returns a JSON list with all the communities. **Request**: .. sourcecode:: http GET /communities HTTP/1.1 Accept: application/json Content-Type: application/json Host: localhost:5000 :reqheader Content-Type: application/json **Response**: .. sourcecode:: http HTTP/1.0 200 OK Content-Length: 334 Content-Type: application/json [ { "id": "comm1" }, { "id": "comm2" } ] :resheader Content-Type: application/json :statuscode 200: no error """ urlkwargs = { 'q': query, 'sort': sort, 'size': size, } communities = Community.filter_communities(query, sort) page = communities.paginate(page, size) links = default_links_pagination_factory(page, urlkwargs) links_headers = map(lambda key: ('link', 'ref="{0}" href="{1}"'.format( key, links[key])), links) return self.make_response( page, headers=links_headers, links_item_factory=default_links_item_factory, page=page, urlkwargs=urlkwargs, links_pagination_factory=default_links_pagination_factory, )
Get a list of all the communities. .. http:get:: /communities/(string:id) Returns a JSON list with all the communities. **Request**: .. sourcecode:: http GET /communities HTTP/1.1 Accept: application/json Content-Type: application/json Host: localhost:5000 :reqheader Content-Type: application/json **Response**: .. sourcecode:: http HTTP/1.0 200 OK Content-Length: 334 Content-Type: application/json [ { "id": "comm1" }, { "id": "comm2" } ] :resheader Content-Type: application/json :statuscode 200: no error
async def async_enqueue_sync(self, func, *func_args): ''' Enqueue an arbitrary synchronous function. ''' worker = self.pick_sticky(0) # just pick first always args = (func,) + func_args await worker.enqueue(enums.Task.FUNC, args)
Enqueue an arbitrary synchronous function.
def _list_templates(settings): """ List templates from settings. """ for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
List templates from settings.
def output_selector_schema(config_cls): ''' A decorator for a annotating a function that can take the selected properties of a ``config_value`` and an instance of a custom type and materialize it. Args: config_cls (Selector): ''' config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, 'config_cls') def _wrap(func): def _selector(context, config_value, runtime_value): selector_key, selector_value = single_item(config_value) return func(context, selector_key, selector_value, runtime_value) return _create_output_schema(config_type, _selector) return _wrap
A decorator for a annotating a function that can take the selected properties of a ``config_value`` and an instance of a custom type and materialize it. Args: config_cls (Selector):
def _join_all_filenames_and_text( self): """ *join all file names, driectory names and text content together* """ self.log.info('starting the ``_join_all_filenames_and_text`` method') contentString = u"" for i in self.directoryContents: contentString += u"%(i)s\n" % locals() if os.path.isfile(os.path.join(i)): if i[-4:] in [".png", ".jpg", ".gif"]: continue readFile = codecs.open(i, encoding='ISO-8859-1', mode='r') if ".DS_Store" in i: continue data = readFile.read() contentString += u"%(data)s\n" % locals() readFile.close() self.contentString = contentString self.log.info('completed the ``_join_all_filenames_and_text`` method') return None
*join all file names, driectory names and text content together*
def predict_encoding(file_path, n_lines=20): '''Get file encoding of a text file''' import chardet # Open the file as binary data with open(file_path, 'rb') as f: # Join binary lines for specified number of lines rawdata = b''.join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)['encoding']
Get file encoding of a text file
def pdf_rotate( input: str, counter_clockwise: bool = False, pages: [str] = None, output: str = None, ): """ Rotate the given Pdf files clockwise or counter clockwise. :param inputs: pdf files :param counter_clockwise: rotate counter clockwise if true else clockwise :param pages: list of page numbers to rotate, if None all pages will be rotated """ infile = open(input, "rb") reader = PdfFileReader(infile) writer = PdfFileWriter() # get pages from source depending on pages parameter if pages is None: source_pages = reader.pages else: pages = parse_rangearg(pages, len(reader.pages)) source_pages = [reader.getPage(i) for i in pages] # rotate pages and add to writer for i, page in enumerate(source_pages): if pages is None or i in pages: if counter_clockwise: writer.addPage(page.rotateCounterClockwise(90)) else: writer.addPage(page.rotateClockwise(90)) else: writer.addPage(page) # Open output file or temporary file for writing if output is None: outfile = NamedTemporaryFile(delete=False) else: if not os.path.isfile(output) or overwrite_dlg(output): outfile = open(output, "wb") else: return # Write to file writer.write(outfile) infile.close() outfile.close() # If no output defined move temporary file to input if output is None: if overwrite_dlg(input): os.remove(input) move(outfile.name, input) else: os.remove(outfile.name)
Rotate the given Pdf files clockwise or counter clockwise. :param inputs: pdf files :param counter_clockwise: rotate counter clockwise if true else clockwise :param pages: list of page numbers to rotate, if None all pages will be rotated
def _fmt_auto(cls, x, **kw): """auto formatting class-method.""" f = cls._to_float(x) if abs(f) > 1e8: fn = cls._fmt_exp else: if f - round(f) == 0: fn = cls._fmt_int else: fn = cls._fmt_float return fn(x, **kw)
auto formatting class-method.
def run_pipelines(pipeline_id_pattern, root_dir, use_cache=True, dirty=False, force=False, concurrency=1, verbose_logs=True, progress_cb=None, slave=False): """Run a pipeline by pipeline-id. pipeline-id supports the '%' wildcard for any-suffix matching. Use 'all' or '%' for running all pipelines""" with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency, thread_name_prefix='T') as executor: try: results = [] pending_futures = set() done_futures = set() finished_futures = [] progress_thread = None progress_queue = None status_manager = status_mgr(root_dir) if progress_cb is not None: progress_queue = Queue() progress_thread = threading.Thread(target=progress_report_handler, args=(progress_cb, progress_queue)) progress_thread.start() all_specs = specs_to_execute(pipeline_id_pattern, root_dir, status_manager, force, dirty, results) while True: done = None if len(done_futures) > 0: done = done_futures.pop() finished_futures.append(done) done = done.result()[0] try: spec = all_specs.send(done) except StopIteration: spec = None if spec is None: # Wait for all runners to idle... if len(done_futures) == 0: if len(pending_futures) > 0: done_futures, pending_futures = \ concurrent.futures.wait(pending_futures, return_when=concurrent.futures.FIRST_COMPLETED) continue else: break else: continue if len(spec.validation_errors) > 0: results.append( ExecutionResult(spec.pipeline_id, False, {}, ['init'] + list(map(str, spec.validation_errors))) ) continue if slave: ps = status_manager.get(spec.pipeline_id) ps.init(spec.pipeline_details, spec.source_details, spec.validation_errors, spec.cache_hash) eid = gen_execution_id() if ps.queue_execution(eid, 'manual'): success, stats, errors = \ execute_pipeline(spec, eid, use_cache=use_cache) results.append(ExecutionResult( spec.pipeline_id, success, stats, errors )) else: results.append( ExecutionResult(spec.pipeline_id, False, None, ['Already Running']) ) else: f = executor.submit(remote_execute_pipeline, spec, root_dir, use_cache, verbose_logs, progress_queue) pending_futures.add(f) for f in finished_futures: ret = f.result() results.append(ExecutionResult(*ret)) except KeyboardInterrupt: pass finally: if slave: finalize() if progress_thread is not None: progress_queue.put(None) progress_thread.join() return results
Run a pipeline by pipeline-id. pipeline-id supports the '%' wildcard for any-suffix matching. Use 'all' or '%' for running all pipelines
def from_url(cls, db_url=ALL_SETS_ZIP_URL): """Load card data from a URL. Uses :func:`requests.get` to fetch card data. Also handles zipfiles. :param db_url: URL to fetch. :return: A new :class:`~mtgjson.CardDb` instance. """ r = requests.get(db_url) r.raise_for_status() if r.headers['content-type'] == 'application/json': return cls(json.loads(r.text)) if r.headers['content-type'] == 'application/zip': with zipfile.ZipFile(six.BytesIO(r.content), 'r') as zf: names = zf.namelist() assert len(names) == 1, 'One datafile in ZIP' return cls.from_file(io.TextIOWrapper( zf.open(names[0]), encoding='utf8'))
Load card data from a URL. Uses :func:`requests.get` to fetch card data. Also handles zipfiles. :param db_url: URL to fetch. :return: A new :class:`~mtgjson.CardDb` instance.
def detach_framebuffer(self, screen_id, id_p): """Removes the graphics updates target for a screen. in screen_id of type int in id_p of type str """ if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") self._call("detachFramebuffer", in_p=[screen_id, id_p])
Removes the graphics updates target for a screen. in screen_id of type int in id_p of type str
def compute_evolution_by_frequency( df, id_cols: List[str], date_col: Union[str, Dict[str, str]], value_col: str, freq=1, method: str = 'abs', format: str = 'column', offseted_suffix: str = '_offseted', evolution_col_name: str = 'evolution_computed', missing_date_as_zero: bool = False, raise_duplicate_error: bool = True ): """ This function answers the question: how has a value changed on a weekly, monthly, yearly basis ? --- ### Parameters *mandatory :* - `id_cols` (*list*): name of the columns used to create each group. - `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with: - `selector` (*str*): the name of the column - `format` (*str*): the format of the date (see [pandas doc]( https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)) - `value_col` (*str*): name of the column containing the value to compare. *optional :* - `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions - `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value. - `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`. - `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`. - `missing_date_as_zero` (*boolean*): add missing date with zero value. - `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`. - `format` (*str*): `'df'` # Do not change it !!! --- ### Example **Input** | id_cols | value_col | date_col| |:---------:|:------------:|:----------:| | A | 20 | 2010| | | 7 | 2011| | B | 200 | 2010| | | 220 | 2011| | C | 100 | 2011| ```cson compute_evolution_by_frequency: id_cols: "id_cols" date_col: "date_col" value_col: "value_col" ``` **Output** | id_cols | value_col | date_col| evolution| |:---------:|:------------:|:----------:|:---------:| | A | 20 | 2010| null| | | 7 | 2011| -13| | B | 200 | 2010| null| | | 220 | 2011| 20| | C | 100 | 2011| null| """ if missing_date_as_zero: how = 'outer' fillna = 0 else: how = 'left' fillna = None return __compute_evolution( df=df, id_cols=id_cols, value_col=value_col, date_col=date_col, freq=freq, method=method, format=format, offseted_suffix=offseted_suffix, evolution_col_name=evolution_col_name, how=how, fillna=fillna, raise_duplicate_error=raise_duplicate_error )
This function answers the question: how has a value changed on a weekly, monthly, yearly basis ? --- ### Parameters *mandatory :* - `id_cols` (*list*): name of the columns used to create each group. - `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with: - `selector` (*str*): the name of the column - `format` (*str*): the format of the date (see [pandas doc]( https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)) - `value_col` (*str*): name of the column containing the value to compare. *optional :* - `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions - `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value. - `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`. - `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`. - `missing_date_as_zero` (*boolean*): add missing date with zero value. - `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`. - `format` (*str*): `'df'` # Do not change it !!! --- ### Example **Input** | id_cols | value_col | date_col| |:---------:|:------------:|:----------:| | A | 20 | 2010| | | 7 | 2011| | B | 200 | 2010| | | 220 | 2011| | C | 100 | 2011| ```cson compute_evolution_by_frequency: id_cols: "id_cols" date_col: "date_col" value_col: "value_col" ``` **Output** | id_cols | value_col | date_col| evolution| |:---------:|:------------:|:----------:|:---------:| | A | 20 | 2010| null| | | 7 | 2011| -13| | B | 200 | 2010| null| | | 220 | 2011| 20| | C | 100 | 2011| null|
def controller(self, *paths, **query_kwargs): """create a new url object using the controller path as a base if you have a controller `foo.BarController` then this would create a new Url instance with `host/foo/bar` as the base path, so any *paths will be appended to `/foo/bar` :example: # controller foo.BarController print url # http://host.com/foo/bar/some_random_path print url.controller() # http://host.com/foo/bar print url.controller("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the controller path :param **query_kwargs: dict, any query string params to add """ kwargs = self._normalize_params(*paths, **query_kwargs) if self.controller_path: if "path" in kwargs: paths = self.normalize_paths(self.controller_path, kwargs["path"]) kwargs["path"] = "/".join(paths) else: kwargs["path"] = self.controller_path return self.create(self.root, **kwargs)
create a new url object using the controller path as a base if you have a controller `foo.BarController` then this would create a new Url instance with `host/foo/bar` as the base path, so any *paths will be appended to `/foo/bar` :example: # controller foo.BarController print url # http://host.com/foo/bar/some_random_path print url.controller() # http://host.com/foo/bar print url.controller("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the controller path :param **query_kwargs: dict, any query string params to add
def table(self, name=DEFAULT_TABLE, **options): """ Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use. """ if name in self._table_cache: return self._table_cache[name] table_class = options.pop('table_class', self._cls_table) table = table_class(self._cls_storage_proxy(self._storage, name), name, **options) self._table_cache[name] = table return table
Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use.
def tileAddress(self, zoom, point): "Returns a tile address based on a zoom level and \ a point in the tile" [x, y] = point assert x <= self.MAXX and x >= self.MINX assert y <= self.MAXY and y >= self.MINY assert zoom in range(0, len(self.RESOLUTIONS)) tileS = self.tileSize(zoom) offsetX = abs(x - self.MINX) if self.originCorner == 'bottom-left': offsetY = abs(y - self.MINY) elif self.originCorner == 'top-left': offsetY = abs(self.MAXY - y) col = offsetX / tileS row = offsetY / tileS # We are exactly on the edge of a tile and the extent if x in (self.MINX, self.MAXX) and col.is_integer(): col = max(0, col - 1) if y in (self.MINY, self.MAXY) and row.is_integer(): row = max(0, row - 1) return [ int(math.floor(col)), int(math.floor(row)) ]
Returns a tile address based on a zoom level and \ a point in the tile
def return_hdr(self): """ subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header """ self.fdtfile = None try: self.EEG = loadmat(str(self.filename), struct_as_record=False, squeeze_me=True)['EEG'] self.hdf5 = False except NotImplementedError: self.hdf5 = True if not self.hdf5: self.s_freq = self.EEG.srate chan_name = [chan.labels for chan in self.EEG.chanlocs] n_samples = self.EEG.pnts if isinstance(self.EEG.subject, str): subj_id = self.EEG.subject else: subj_id = '' try: start_time = datetime(*self.EEG.etc.T0) except AttributeError: start_time = DEFAULT_DATETIME if isinstance(self.EEG.datfile, str): self.fdtfile = self.EEG.datfile else: self.data = self.EEG.data else: with File(self.filename) as f: EEG = f['EEG'] self.s_freq = EEG['srate'].value.item() chan_name = read_hdf5_chan_name(f, EEG['chanlocs']['labels']) n_samples = int(EEG['pnts'].value.item()) subj_id = read_hdf5_str(EEG['subject']) try: start_time = datetime(*EEG['etc']['T0']) except ValueError: start_time = DEFAULT_DATETIME datfile = read_hdf5_str(EEG['datfile']) if datfile == '': self.data = EEG['data'].value.T # for some reason, you need to transpose this else: self.fdtfile = datfile if self.fdtfile is not None: memshape = (len(chan_name), int(n_samples)) memmap_file = self.filename.parent / self.fdtfile if not memmap_file.exists(): renamed_memmap_file = self.filename.with_suffix('.fdt') if not renamed_memmap_file.exists(): raise FileNotFoundError(f'No file {memmap_file} or {renamed_memmap_file}') else: memmap_file = renamed_memmap_file self.data = memmap(str(memmap_file), 'float32', mode='c', shape=memshape, order='F') return subj_id, start_time, self.s_freq, chan_name, n_samples, {}
subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header
def accel_zoom_in(self, *args): """Callback to zoom in. """ for term in self.get_notebook().iter_terminals(): term.increase_font_size() return True
Callback to zoom in.
def _se_all(self): """Standard errors (SE) for all parameters, including the intercept.""" err = np.expand_dims(self._ms_err, axis=1) t1 = np.diagonal( np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)), axis1=1, axis2=2, ) return np.squeeze(np.sqrt(t1 * err))
Standard errors (SE) for all parameters, including the intercept.
def _post_run_hook(self, runtime): ''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' outputs = self.aggregate_outputs(runtime=runtime) self._anat_file = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'brain.mgz') self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'ribbon.mgz') self._masked = False NIWORKFLOWS_LOG.info('Generating report for ReconAll (subject %s)', outputs.subject_id) return super(ReconAllRPT, self)._post_run_hook(runtime)
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
def _get_stddevs(self, C, stddev_types, num_sites): """ Return total standard deviation as described in paragraph 5.2 pag 200. """ # interevent stddev sigma_inter = C['tau'] + np.zeros(num_sites) # intraevent std sigma_intra = C['sigma'] + np.zeros(num_sites) std = [] for stddev_type in stddev_types: if stddev_type == const.StdDev.TOTAL: # equation in section 5.2 page 200 std += [np.sqrt(sigma_intra**2 + sigma_inter**2)] elif stddev_type == const.StdDev.INTRA_EVENT: std.append(sigma_intra) elif stddev_type == const.StdDev.INTER_EVENT: std.append(sigma_inter) return std
Return total standard deviation as described in paragraph 5.2 pag 200.
def docs(ctx, clean=False, browse=False, watch=False): """Build the docs.""" if clean: clean_docs(ctx) if watch: watch_docs(ctx, browse=browse) else: build_docs(ctx, browse=browse)
Build the docs.
def elastic_install(self): """ elasticsearch install :return: """ with cd('/tmp'): if not exists('elastic.deb'): sudo('wget {0} -O elastic.deb'.format( bigdata_conf.elastic_download_url )) sudo('dpkg -i elastic.deb') sudo('apt-get install -y')
elasticsearch install :return:
def match(self, metadata, user = None): """Does the specified metadata match this template? returns (success,metadata,parameters)""" assert isinstance(metadata, self.formatclass) return self.generate(metadata,user)
Does the specified metadata match this template? returns (success,metadata,parameters)
def configure_cache(app): """ Sets up an attribute to cache data in the app context """ log = logging.getLogger('ara.webapp.configure_cache') log.debug('Configuring cache') if not getattr(app, '_cache', None): app._cache = {}
Sets up an attribute to cache data in the app context
def alias_action(self, *args, **kwargs): """ Alias one or more actions into another one. self.alias_action('create', 'read', 'update', 'delete', to='crud') """ to = kwargs.pop('to', None) if not to: return error_message = ("You can't specify target ({}) as alias " "because it is real action name".format(to) ) if to in list(itertools.chain(*self.aliased_actions.values())): raise Exception(error_message) self.aliased_actions.setdefault(to, []).extend(args)
Alias one or more actions into another one. self.alias_action('create', 'read', 'update', 'delete', to='crud')
def set_memory_params(self, ksm_interval=None, no_swap=None): """Set memory related parameters. :param int ksm_interval: Kernel Samepage Merging frequency option, that can reduce memory usage. Accepts a number of requests (or master process cycles) to run page scanner after. .. note:: Linux only. * http://uwsgi.readthedocs.io/en/latest/KSM.html :param bool no_swap: Lock all memory pages avoiding swapping. """ self._set('ksm', ksm_interval) self._set('never_swap', no_swap, cast=bool) return self._section
Set memory related parameters. :param int ksm_interval: Kernel Samepage Merging frequency option, that can reduce memory usage. Accepts a number of requests (or master process cycles) to run page scanner after. .. note:: Linux only. * http://uwsgi.readthedocs.io/en/latest/KSM.html :param bool no_swap: Lock all memory pages avoiding swapping.
def create_folder(name, location='\\'): r''' Create a folder in which to create tasks. :param str name: The name of the folder. This will be displayed in the task scheduler. :param str location: A string value representing the location in which to create the folder. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: True if successful, False if unsuccessful :rtype: bool CLI Example: .. code-block:: bash salt 'minion-id' task.create_folder <folder_name> ''' # Check for existing folder if name in list_folders(location): # Connect to an existing task definition return '{0} already exists'.format(name) # Create the task service object with salt.utils.winapi.Com(): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() # Get the folder to list folders from task_folder = task_service.GetFolder(location) task_folder.CreateFolder(name) # Verify creation if name in list_folders(location): return True else: return False
r''' Create a folder in which to create tasks. :param str name: The name of the folder. This will be displayed in the task scheduler. :param str location: A string value representing the location in which to create the folder. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: True if successful, False if unsuccessful :rtype: bool CLI Example: .. code-block:: bash salt 'minion-id' task.create_folder <folder_name>
def locate_ranges(self, starts, stops, strict=True): """Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
def forwards(apps, schema_editor): """ Re-save all the Works because something earlier didn't create their slugs. """ Work = apps.get_model('spectator_events', 'Work') for work in Work.objects.all(): if not work.slug: work.slug = generate_slug(work.pk) work.save()
Re-save all the Works because something earlier didn't create their slugs.
def _GetTable(self): """Returns table, with column headers and separators. Returns: The whole table including headers as a string. Each row is joined by a newline and each entry by self.separator. """ result = [] # Avoid the global lookup cost on each iteration. lstr = str for row in self._table: result.append( '%s\n' % self.separator.join(lstr(v) for v in row)) return ''.join(result)
Returns table, with column headers and separators. Returns: The whole table including headers as a string. Each row is joined by a newline and each entry by self.separator.
def connection_made(self, transport): ''' override _SiriDBProtocol ''' self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.future = self.send_package( protomap.CPROTO_REQ_INFO, data=None, timeout=10)
override _SiriDBProtocol
def compound_powerspec(data, tbin, Df=None, pointProcess=False): """ Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),) """ return powerspec([np.sum(data, axis=0)], tbin, Df=Df, units=True, pointProcess=pointProcess)
Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),)
def l2traceroute_result_output_l2_hop_results_l2_hop_ingress_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") l2traceroute_result = ET.Element("l2traceroute_result") config = l2traceroute_result output = ET.SubElement(l2traceroute_result, "output") l2_hop_results = ET.SubElement(output, "l2-hop-results") l2_hop = ET.SubElement(l2_hop_results, "l2-hop") ingress = ET.SubElement(l2_hop, "ingress") interface_name = ET.SubElement(ingress, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
async def writelines(self, lines, eof = False, buffering = True): """ Write lines to current output stream """ for l in lines: await self.write(l, False, buffering) if eof: await self.write(b'', eof, buffering)
Write lines to current output stream
def __collapseLineOrCol(self, line, d): """ Merge tiles in a line or column according to a direction and return a tuple with the new line and the score for the move on this line """ if (d == Board.LEFT or d == Board.UP): inc = 1 rg = xrange(0, self.__size-1, inc) else: inc = -1 rg = xrange(self.__size-1, 0, inc) pts = 0 for i in rg: if line[i] == 0: continue if line[i] == line[i+inc]: v = line[i]*2 if v == self.__goal: self.__won = True line[i] = v line[i+inc] = 0 pts += v return (line, pts)
Merge tiles in a line or column according to a direction and return a tuple with the new line and the score for the move on this line
def render_in_page(request, template): """return rendered template in standalone mode or ``False`` """ from leonardo.module.web.models import Page page = request.leonardo_page if hasattr( request, 'leonardo_page') else Page.objects.filter(parent=None).first() if page: try: slug = request.path_info.split("/")[-2:-1][0] except KeyError: slug = None try: body = render_to_string(template, RequestContext(request, { 'request_path': request.path, 'feincms_page': page, 'slug': slug, 'standalone': True})) response = http.HttpResponseNotFound( body, content_type=CONTENT_TYPE) except TemplateDoesNotExist: response = False return response return False
return rendered template in standalone mode or ``False``
def _get_rules_from_aws(self): """ Load the EC2 security rules off AWS into a list of dict. Returns: list """ list_of_rules = list() if self.profile: boto3.setup_default_session(profile_name=self.profile) if self.region: ec2 = boto3.client('ec2', region_name=self.region) else: ec2 = boto3.client('ec2') security_groups = ec2.describe_security_groups(Filters=self.filters) for group in security_groups['SecurityGroups']: group_dict = dict() group_dict['id'] = group['GroupId'] group_dict['name'] = group['GroupName'] group_dict['description'] = group.get('Description', None) if (group.get('IpPermissions', None) or group.get('IpPermissionsEgress', None)): group_dict['rules'] = list() for rule in group.get('IpPermissions', None): rule_dict = self._build_rule(rule) rule_dict['direction'] = "INGRESS" group_dict['rules'].append(rule_dict) for rule in group.get('IpPermissionsEgress', None): rule_dict = self._build_rule(rule) rule_dict['direction'] = "EGRESS" group_dict['rules'].append(rule_dict) list_of_rules.append(group_dict) return list_of_rules
Load the EC2 security rules off AWS into a list of dict. Returns: list
def force_constants(self, force_constants): """Set force constants Parameters ---------- force_constants : array_like Force constants matrix. If this is given in own condiguous ndarray with order='C' and dtype='double', internal copy of data is avoided. Therefore some computational resources are saved. shape=(atoms in supercell, atoms in supercell, 3, 3), dtype='double' """ if type(force_constants) is np.ndarray: fc_shape = force_constants.shape if fc_shape[0] != fc_shape[1]: if self._primitive.get_number_of_atoms() != fc_shape[0]: msg = ("Force constants shape disagrees with crystal " "structure setting. This may be due to " "PRIMITIVE_AXIS.") raise RuntimeError(msg) self._force_constants = force_constants if self._primitive.get_masses() is not None: self._set_dynamical_matrix()
Set force constants Parameters ---------- force_constants : array_like Force constants matrix. If this is given in own condiguous ndarray with order='C' and dtype='double', internal copy of data is avoided. Therefore some computational resources are saved. shape=(atoms in supercell, atoms in supercell, 3, 3), dtype='double'
def scan(host, port=80, url=None, https=False, timeout=1, max_size=65535): """ Scan a network port Parameters ---------- host : str Host or ip address to scan port : int, optional Port to scan, default=80 url : str, optional URL to perform get request to on the host and port specified https : bool, optional Perform ssl connection on the socket, default=False timeout : float Timeout for network operations, default=1 Returns ------- dict Result dictionary that contains the following keys: host - The host or IP address that was scanned port - The port number that was scanned state - The state of the port, will be either "open" or "closed" durations - An ordered dictionary with floating point value of the time elapsed for each connection operation Raises ------ ScanFailed - The scan operation failed """ starts = OrderedDict() ends = OrderedDict() port = int(port) result = dict( host=host, port=port, state='closed', durations=OrderedDict() ) if url: timeout = 1 result['code'] = None starts['all'] = starts['dns'] = datetime.datetime.now() # DNS Lookup try: hostip = socket.gethostbyname(host) result['ip'] = hostip ends['dns'] = datetime.datetime.now() except socket.gaierror: raise ScanFailed('DNS Lookup failed', result=result) # TCP Connect starts['connect'] = datetime.datetime.now() network_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) network_socket.settimeout(timeout) result_connection = network_socket.connect_ex((hostip, port)) ends['connect'] = datetime.datetime.now() # SSL if https: starts['ssl'] = datetime.datetime.now() try: network_socket = ssl.wrap_socket(network_socket) except socket.timeout: raise ScanFailed('SSL socket timeout', result=result) ends['ssl'] = datetime.datetime.now() # Get request if result_connection == 0 and url: starts['request'] = datetime.datetime.now() network_socket.send( "GET {0} HTTP/1.0\r\nHost: {1}\r\n\r\n".format( url, host ).encode('ascii')) if max_size: data = network_socket.recv(max_size) else: data = network_socket.recv() result['length'] = len(data) data = data.decode('ascii', errors='ignore') result['response'] = (data) try: result['code'] = int(data.split('\n')[0].split()[1]) except IndexError: pass ends['request'] = datetime.datetime.now() network_socket.close() # Calculate durations ends['all'] = datetime.datetime.now() for duration in starts.keys(): if duration in ends.keys(): result['durations'][duration] = ends[duration] - starts[duration] if result_connection == 0: result['state'] = 'open' return result
Scan a network port Parameters ---------- host : str Host or ip address to scan port : int, optional Port to scan, default=80 url : str, optional URL to perform get request to on the host and port specified https : bool, optional Perform ssl connection on the socket, default=False timeout : float Timeout for network operations, default=1 Returns ------- dict Result dictionary that contains the following keys: host - The host or IP address that was scanned port - The port number that was scanned state - The state of the port, will be either "open" or "closed" durations - An ordered dictionary with floating point value of the time elapsed for each connection operation Raises ------ ScanFailed - The scan operation failed
def disableGroup(self): """Disables all radio buttons in the group""" radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.disable()
Disables all radio buttons in the group
def reqHeadTimeStamp( self, contract: Contract, whatToShow: str, useRTH: bool, formatDate: int = 1) -> datetime.datetime: """ Get the datetime of earliest available historical data for the contract. Args: contract: Contract of interest. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. formatDate: If set to 2 then the result is returned as a timezone-aware datetime.datetime with UTC timezone. """ return self._run( self.reqHeadTimeStampAsync( contract, whatToShow, useRTH, formatDate))
Get the datetime of earliest available historical data for the contract. Args: contract: Contract of interest. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. formatDate: If set to 2 then the result is returned as a timezone-aware datetime.datetime with UTC timezone.
def dump_np_vars(self, store_format='csv', delimiter=','): """ Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag """ ret = False if self.system.files.no_output is True: logger.debug('no_output is True, thus no TDS dump saved ') return True if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter): ret = True return ret
Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag
def _concat_translations(translations: List[Translation], stop_ids: Set[int], length_penalty: LengthPenalty, brevity_penalty: Optional[BrevityPenalty] = None) -> Translation: """ Combines translations through concatenation. :param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length. :param stop_ids: The EOS symbols. :param length_penalty: Instance of the LengthPenalty class initialized with alpha and beta. :param brevity_penalty: Optional Instance of the BrevityPenalty class initialized with a brevity weight. :return: A concatenation of the translations with a score. """ # Concatenation of all target ids without BOS and EOS target_ids = [] attention_matrices = [] beam_histories = [] # type: List[BeamHistory] estimated_reference_length = None # type: float for idx, translation in enumerate(translations): if idx == len(translations) - 1: target_ids.extend(translation.target_ids) attention_matrices.append(translation.attention_matrix) else: if translation.target_ids[-1] in stop_ids: target_ids.extend(translation.target_ids[:-1]) attention_matrices.append(translation.attention_matrix[:-1, :]) else: target_ids.extend(translation.target_ids) attention_matrices.append(translation.attention_matrix) beam_histories.extend(translation.beam_histories) if translation.estimated_reference_length is not None: if estimated_reference_length is None: estimated_reference_length = translation.estimated_reference_length else: estimated_reference_length += translation.estimated_reference_length # Combine attention matrices: attention_shapes = [attention_matrix.shape for attention_matrix in attention_matrices] attention_matrix_combined = np.zeros(np.sum(np.asarray(attention_shapes), axis=0)) pos_t, pos_s = 0, 0 for attention_matrix, (len_t, len_s) in zip(attention_matrices, attention_shapes): attention_matrix_combined[pos_t:pos_t + len_t, pos_s:pos_s + len_s] = attention_matrix pos_t += len_t pos_s += len_s def _brevity_penalty(hypothesis_length, reference_length): return 0.0 if brevity_penalty is None else brevity_penalty.get(hypothesis_length, reference_length) # Unnormalize + sum and renormalize the score: score = sum((translation.score + _brevity_penalty(len(translation.target_ids), translation.estimated_reference_length)) \ * length_penalty.get(len(translation.target_ids)) for translation in translations) score = score / length_penalty.get(len(target_ids)) - _brevity_penalty(len(target_ids), estimated_reference_length) return Translation(target_ids, attention_matrix_combined, score, beam_histories, estimated_reference_length=estimated_reference_length)
Combines translations through concatenation. :param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length. :param stop_ids: The EOS symbols. :param length_penalty: Instance of the LengthPenalty class initialized with alpha and beta. :param brevity_penalty: Optional Instance of the BrevityPenalty class initialized with a brevity weight. :return: A concatenation of the translations with a score.
def load_map(path, value): ''' Loads the map at the specified path, and returns the specified value from that map. CLI Example: .. code-block:: bash # Assuming the map is loaded in your formula SLS as follows: # # {% from "myformula/map.jinja" import myformula with context %} # # the following syntax can be used to load the map and check the # results: salt myminion jinja.load_map myformula/map.jinja myformula ''' tmplstr = textwrap.dedent('''\ {{% from "{path}" import {value} with context %}} {{{{ {value} | tojson }}}} '''.format(path=path, value=value)) return salt.template.compile_template_str( tmplstr, salt.loader.render(__opts__, __salt__), __opts__['renderer'], __opts__['renderer_blacklist'], __opts__['renderer_whitelist'])
Loads the map at the specified path, and returns the specified value from that map. CLI Example: .. code-block:: bash # Assuming the map is loaded in your formula SLS as follows: # # {% from "myformula/map.jinja" import myformula with context %} # # the following syntax can be used to load the map and check the # results: salt myminion jinja.load_map myformula/map.jinja myformula
def adaptive_model_average(X, penalization, method): """Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion matrix. NOTE: Only method = 'binary' really makes sense in this case. """ n_trials = 100 print("Adaptive ModelAverage with:") print(" estimator: QuicGraphicalLasso (default)") print(" n_trials: {}".format(n_trials)) print(" penalization: {}".format(penalization)) print(" adaptive-method: {}".format(method)) # if penalization is random, first find a decent scalar lam_ to build # random perturbation matrix around. lam doesn't matter for fully-random. lam = 0.5 if penalization == "random": cv_model = QuicGraphicalLassoCV( cv=2, n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric ) cv_model.fit(X) lam = cv_model.lam_ print(" lam: {}".format(lam)) model = AdaptiveGraphicalLasso( estimator=ModelAverage( n_trials=n_trials, penalization=penalization, lam=lam, n_jobs=1 ), method=method, ) model.fit(X) lam_norm_ = np.linalg.norm(model.estimator_.lam_) print(" ||lam_||_2: {}".format(lam_norm_)) return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion matrix. NOTE: Only method = 'binary' really makes sense in this case.
def plot_calibrated_diode(dio_cross,chan_per_coarse=8,feedtype='l',**kwargs): ''' Plots the corrected noise diode spectrum for a given noise diode measurement after application of the inverse Mueller matrix for the electronics chain. ''' #Get full stokes data for the ND observation obs = Waterfall(dio_cross,max_load=150) freqs = obs.populate_freqs() tsamp = obs.header['tsamp'] data = obs.data obs = None I,Q,U,V = get_stokes(data,feedtype) data = None #Calculate Mueller Matrix variables for each coarse channel psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs) G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs) #Apply the Mueller matrix to original noise diode data and refold I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype) I_OFF,I_ON = foldcal(I,tsamp,**kwargs) Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs) U_OFF,U_ON = foldcal(U,tsamp,**kwargs) V_OFF,V_ON = foldcal(V,tsamp,**kwargs) #Delete data arrays for space I = None Q = None U = None V = None #Plot new ON-OFF spectra plt.plot(freqs,I_ON-I_OFF,'k-',label='I') plt.plot(freqs,Q_ON-Q_OFF,'r-',label='Q') plt.plot(freqs,U_ON-U_OFF,'g-',label='U') plt.plot(freqs,V_ON-V_OFF,'m-',label='V') plt.legend() plt.xlabel('Frequency (MHz)') plt.title('Calibrated Full Stokes Noise Diode Spectrum') plt.ylabel('Power (Counts)')
Plots the corrected noise diode spectrum for a given noise diode measurement after application of the inverse Mueller matrix for the electronics chain.
def _grab_raw_image(self): """ :returns: the current 3-channel image """ m = self.ale.getScreenRGB() return m.reshape((self.height, self.width, 3))
:returns: the current 3-channel image
def start(self, blocking=True): """Start the producer. This will eventually fire the ``server_start`` and ``running`` events in sequence, which signify that the incoming TCP request socket is running and the workers have been forked, respectively. If ``blocking`` is False, control .""" self.setup_zmq() if blocking: self.serve() else: eventlet.spawn(self.serve) # ensure that self.serve runs now as calling code will # expect start() to have started the server even non-blk eventlet.sleep(0)
Start the producer. This will eventually fire the ``server_start`` and ``running`` events in sequence, which signify that the incoming TCP request socket is running and the workers have been forked, respectively. If ``blocking`` is False, control .
def is_module_function(obj, prop): """ Checking and setting type to MODULE_FUNCTION Args: obj: ModuleType prop: FunctionType Return: Boolean Raise: prop_type_error: When the type of prop is not valid prop_in_obj_error: When prop is not in the obj(module/class) prop_is_func_error: When prop is not a callable stuff """ python_version = sys.version_info[0] if python_version == 3: unicode = str if prop and (isinstance(prop, str) or isinstance(prop, unicode)): #property if prop in dir(obj): if ( isinstance(getattr(obj, prop), FunctionType) or isinstance(getattr(obj, prop), BuiltinFunctionType) or inspect.ismethod(getattr(obj, prop)) ): #inspect.ismethod for python2.7 #isinstance(...) for python3.x return True else: ErrorHandler.prop_is_func_error(obj, prop) else: ErrorHandler.prop_in_obj_error(obj, prop) elif prop: ErrorHandler.prop_type_error(prop) return False
Checking and setting type to MODULE_FUNCTION Args: obj: ModuleType prop: FunctionType Return: Boolean Raise: prop_type_error: When the type of prop is not valid prop_in_obj_error: When prop is not in the obj(module/class) prop_is_func_error: When prop is not a callable stuff
def getargspec(func): """ Used because getargspec for python 2.7 does not accept functools.partial which is the type for pytest fixtures. getargspec excerpted from: sphinx.util.inspect ~~~~~~~~~~~~~~~~~~~ Helpers for inspecting Python modules. :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. Like inspect.getargspec but supports functools.partial as well. """ # noqa: E731 type: (Any) -> Any if inspect.ismethod(func): func = func.__func__ parts = 0, () # noqa: E731 type: Tuple[int, Tuple[unicode, ...]] if type(func) is partial: keywords = func.keywords if keywords is None: keywords = {} parts = len(func.args), keywords.keys() func = func.func if not inspect.isfunction(func): raise TypeError('%r is not a Python function' % func) args, varargs, varkw = inspect.getargs(func.__code__) func_defaults = func.__defaults__ if func_defaults is None: func_defaults = [] else: func_defaults = list(func_defaults) if parts[0]: args = args[parts[0]:] if parts[1]: for arg in parts[1]: i = args.index(arg) - len(args) # type: ignore del args[i] try: del func_defaults[i] except IndexError: pass return inspect.ArgSpec(args, varargs, varkw, func_defaults)
Used because getargspec for python 2.7 does not accept functools.partial which is the type for pytest fixtures. getargspec excerpted from: sphinx.util.inspect ~~~~~~~~~~~~~~~~~~~ Helpers for inspecting Python modules. :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. Like inspect.getargspec but supports functools.partial as well.
def from_events(self, instance, ev_args, ctx): """ Collect the events and convert them to a single XML subtree, which then gets appended to the list at `instance`. `ev_args` must be the arguments of the ``"start"`` event of the new child. This method is suspendable. """ # goal: collect all elements starting with the element for which we got # the start-ev_args in a lxml.etree.Element. def make_from_args(ev_args, parent): el = etree.SubElement(parent, tag_to_str((ev_args[0], ev_args[1]))) for key, value in ev_args[2].items(): el.set(tag_to_str(key), value) return el root_el = make_from_args(ev_args, self.__get__(instance, type(instance))) # create an element stack stack = [root_el] while stack: # we get send all sax-ish events until we return. we return when # the stack is empty, i.e. when our top element ended. ev_type, *ev_args = yield if ev_type == "start": # new element started, create and push to stack stack.append(make_from_args(ev_args, stack[-1])) elif ev_type == "text": # text for current element curr = stack[-1] if curr.text is not None: curr.text += ev_args[0] else: curr.text = ev_args[0] elif ev_type == "end": # element ended, remove from stack (it is already appended to # the current element) stack.pop() else: # not in coverage -- this is more like an assertion raise ValueError(ev_type)
Collect the events and convert them to a single XML subtree, which then gets appended to the list at `instance`. `ev_args` must be the arguments of the ``"start"`` event of the new child. This method is suspendable.
def as_python(self, infile, include_original_shex: bool=False): """ Return the python representation of the document """ self._context.resolve_circular_references() # add forwards for any circular entries body = '' for k in self._context.ordered_elements(): v = self._context.grammarelts[k] if isinstance(v, (JSGLexerRuleBlock, JSGObjectExpr)): body += v.as_python(k) if isinstance(v, JSGObjectExpr) and not self._context.has_typeid: self._context.directives.append(f'_CONTEXT.TYPE_EXCEPTIONS.append("{k}")') elif isinstance(v, JSGForwardRef): pass elif isinstance(v, (JSGValueType, JSGArrayExpr)): body += f"\n\n\n{k} = {v.signature_type()}" else: raise NotImplementedError("Unknown grammar elt for {}".format(k)) self._context.forward_refs.pop(k, None) body = '\n' + '\n'.join(self._context.directives) + body return _jsg_python_template.format(infile=infile, original_shex='# ' + self.text if include_original_shex else "", version=__version__, gendate=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), body=body)
Return the python representation of the document
def return_xyz(self, labels=None): """Returns the location in xy for some channels. Parameters ---------- labels : list of str, optional the names of the channels. Returns ------- numpy.ndarray a 3xn vector with the position of a channel. """ all_labels = self.return_label() if labels is None: labels = all_labels xyz = [] for one_label in labels: idx = all_labels.index(one_label) xyz.append(self.chan[idx].xyz) return asarray(xyz)
Returns the location in xy for some channels. Parameters ---------- labels : list of str, optional the names of the channels. Returns ------- numpy.ndarray a 3xn vector with the position of a channel.
def poa_horizontal_ratio(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth): """ Calculates the ratio of the beam components of the plane of array irradiance and the horizontal irradiance. Input all angles in degrees. Parameters ---------- surface_tilt : numeric Panel tilt from horizontal. surface_azimuth : numeric Panel azimuth from north. solar_zenith : numeric Solar zenith angle. solar_azimuth : numeric Solar azimuth angle. Returns ------- ratio : numeric Ratio of the plane of array irradiance to the horizontal plane irradiance """ cos_poa_zen = aoi_projection(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) cos_solar_zenith = tools.cosd(solar_zenith) # ratio of tilted and horizontal beam irradiance ratio = cos_poa_zen / cos_solar_zenith try: ratio.name = 'poa_ratio' except AttributeError: pass return ratio
Calculates the ratio of the beam components of the plane of array irradiance and the horizontal irradiance. Input all angles in degrees. Parameters ---------- surface_tilt : numeric Panel tilt from horizontal. surface_azimuth : numeric Panel azimuth from north. solar_zenith : numeric Solar zenith angle. solar_azimuth : numeric Solar azimuth angle. Returns ------- ratio : numeric Ratio of the plane of array irradiance to the horizontal plane irradiance
def mark(self, partition, offset): """ Set the high-water mark in the current context. In order to know the current partition, it is helpful to initialize the consumer to provide partition info via: .. code:: python consumer.provide_partition_info() """ max_offset = max(offset + 1, self.high_water_mark.get(partition, 0)) self.logger.debug("Setting high-water mark to: %s", {partition: max_offset}) self.high_water_mark[partition] = max_offset
Set the high-water mark in the current context. In order to know the current partition, it is helpful to initialize the consumer to provide partition info via: .. code:: python consumer.provide_partition_info()
def char_sets(): """Return a list of the IANA Character Sets, or an empty list if the IANA website is unreachable. Store it as a function attribute so that we only build the list once. """ if not hasattr(char_sets, 'setlist'): clist = [] try: data = requests.get('http://www.iana.org/assignments/character-' 'sets/character-sets-1.csv') except requests.exceptions.RequestException: return [] for line in data.iter_lines(): if line: line = line.decode("utf-8") if line.count(',') > 0: vals = line.split(',') if vals[0]: clist.append(vals[0]) else: clist.append(vals[1]) char_sets.setlist = clist return char_sets.setlist
Return a list of the IANA Character Sets, or an empty list if the IANA website is unreachable. Store it as a function attribute so that we only build the list once.
def parse_translation(f, lineno): """Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines""" line = f.readline() def get_line(f, line, need_keys, lineno, default='""'): line = line.rstrip() if not line: return lineno, need_keys[0], default, line key, value = line.split(' ', 1) # Parse msgid if key not in need_keys: print 'Error Line, need %r: %d, line=' % (need_keys, lineno, line) raise RuntimeError("parse error") v = value while 1: line = f.readline() line = line.rstrip() lineno += 1 if not line or line[0] != '"': break v += '\n' + line[:] return lineno, key, v, line # Parse comments comments = [] while 1: if not line: return lineno, None, None, None if line.strip() == '': return lineno, comments, None, None elif line[0] == '#': comments.append(line[:-1]) else: break line = f.readline() lineno += 1 lineno, key, msgid, line = get_line(f, line, ['msgid'], lineno) lineno, key, value, line = get_line(f, line, ['msgid_plural', 'msgstr'], lineno) if key == 'msgid_plural': msgid = (msgid, value) lineno, key, v1, line = get_line(f, line, ['msgstr[0]'], lineno) lineno, key, v2, line = get_line(f, line, ['msgstr[1]'], lineno) msgstr = (v1, v2) else: msgstr = value if line != '': print 'File: %s Error Line: %s' % (f.name, line) raise RuntimeError("parse error") return lineno, comments, msgid, msgstr
Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines
def _render_reward(self, r: np.float32) -> None: '''Prints reward `r`.''' print("reward = {:.4f}".format(float(r))) print()
Prints reward `r`.
def get_flexports_output_flexport_list_port_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_flexports = ET.Element("get_flexports") config = get_flexports output = ET.SubElement(get_flexports, "output") flexport_list = ET.SubElement(output, "flexport-list") port_id = ET.SubElement(flexport_list, "port-id") port_id.text = kwargs.pop('port_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def associate_keys(user_dict, client): """ This whole function is black magic, had to however cause of the way we keep key-machine association """ added_keys = user_dict['keypairs'] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key]['machines'] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get('public_ips', None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
This whole function is black magic, had to however cause of the way we keep key-machine association
def gc(cn, ns=None, lo=None, iq=None, ico=None, pl=None): """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.GetClass`. Retrieve a class. Parameters: cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class to be retrieved (case independent). If specified as a `CIMClassName` object, its `host` attribute will be ignored. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. lo (:class:`py:bool`): LocalOnly flag: Exclude inherited properties. `None` will cause the server default of `True` to be used. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `True` to be used. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for properties and methods in the retrieved class. `None` will cause the server default of `False` to be used. pl (:term:`string` or :term:`py:iterable` of :term:`string`): PropertyList: Names of properties to be included (if not otherwise excluded). An empty iterable indicates to include no properties. If `None`, all properties will be included. Returns: :class:`~pywbem.CIMClass`: The retrieved class. """ return CONN.GetClass(cn, ns, LocalOnly=lo, IncludeQualifiers=iq, IncludeClassOrigin=ico, PropertyList=pl)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.GetClass`. Retrieve a class. Parameters: cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class to be retrieved (case independent). If specified as a `CIMClassName` object, its `host` attribute will be ignored. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. lo (:class:`py:bool`): LocalOnly flag: Exclude inherited properties. `None` will cause the server default of `True` to be used. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `True` to be used. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for properties and methods in the retrieved class. `None` will cause the server default of `False` to be used. pl (:term:`string` or :term:`py:iterable` of :term:`string`): PropertyList: Names of properties to be included (if not otherwise excluded). An empty iterable indicates to include no properties. If `None`, all properties will be included. Returns: :class:`~pywbem.CIMClass`: The retrieved class.
def get_res(ds, t_srs=None, square=False): """Get GDAL Dataset raster resolution """ gt = ds.GetGeoTransform() ds_srs = get_ds_srs(ds) #This is Xres, Yres res = [gt[1], np.abs(gt[5])] if square: res = [np.mean(res), np.mean(res)] if t_srs is not None and not ds_srs.IsSame(t_srs): if True: #This diagonal approach is similar to the approach in gdaltransformer.cpp #Bad news for large extents near the poles #ullr = get_ullr(ds, t_srs) #diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2) extent = ds_extent(ds, t_srs) diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2) res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2) res = [res, res] else: #Compute from center pixel ct = osr.CoordinateTransformation(ds_srs, t_srs) pt = get_center(ds) #Transform center coordinates pt_ct = ct.TransformPoint(*pt) #Transform center + single pixel offset coordinates pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5]) #Compute resolution in new units res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])] return res
Get GDAL Dataset raster resolution
def fit(self, X, y): """Learn vocabulary from training set. Args: X : iterable. An iterable which yields either str, unicode or file objects. Returns: self : IndexTransformer. """ self._word_vocab.add_documents(X) self._label_vocab.add_documents(y) if self._use_char: for doc in X: self._char_vocab.add_documents(doc) self._word_vocab.build() self._char_vocab.build() self._label_vocab.build() return self
Learn vocabulary from training set. Args: X : iterable. An iterable which yields either str, unicode or file objects. Returns: self : IndexTransformer.
def commit_sell(self, account_id, sell_id, **params): """https://developers.coinbase.com/api/v2#commit-a-sell""" response = self._post( 'v2', 'accounts', account_id, 'sells', sell_id, 'commit', data=params) return self._make_api_object(response, Sell)
https://developers.coinbase.com/api/v2#commit-a-sell
def to_dict(self): """ Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right). """ out = {} for key in self.__dict__.keys(): if key not in ['left', 'right', 'missing', 'parent']: out[key] = self.__dict__[key] return out
Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right).
def copy_directory(src, dest, force=False): ''' Copy an entire directory recursively ''' if os.path.exists(dest) and force is True: shutil.rmtree(dest) try: shutil.copytree(src, dest) except OSError as e: # If the error was caused because the source wasn't a directory if e.errno == errno.ENOTDIR: shutil.copy(src, dest) else: bot.error('Directory not copied. Error: %s' % e) sys.exit(1)
Copy an entire directory recursively
def info(self, msg, indent=0, **kwargs): """invoke ``self.info.debug``""" return self.logger.info(self._indent(msg, indent), **kwargs)
invoke ``self.info.debug``
def _collapse_outgroup(tree, taxdicts): """ collapse outgroup in ete Tree for easier viewing """ ## check that all tests have the same outgroup outg = taxdicts[0]["p4"] if not all([i["p4"] == outg for i in taxdicts]): raise Exception("no good") ## prune tree, keep only one sample from outgroup tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy") alltax = [i for i in tre.get_leaf_names() if i not in outg] alltax += [outg[0]] tre.prune(alltax) tre.search_nodes(name=outg[0])[0].name = "outgroup" tre.ladderize() ## remove other ougroups from taxdicts taxd = copy.deepcopy(taxdicts) newtaxdicts = [] for test in taxd: #test["p4"] = [outg[0]] test["p4"] = ["outgroup"] newtaxdicts.append(test) return tre, newtaxdicts
collapse outgroup in ete Tree for easier viewing
def request_fetch(self, user, repo, request, pull=False, force=False): #pragma: no cover '''Fetches given request as a branch, and switch if pull is true :param repo: name of the repository to create Meant to be implemented by subclasses ''' raise NotImplementedError
Fetches given request as a branch, and switch if pull is true :param repo: name of the repository to create Meant to be implemented by subclasses
def enable_llama_ha(self, new_llama_host_id, zk_service_name=None, new_llama_role_name=None): """ Enable high availability for an Impala Llama ApplicationMaster. This command only applies to CDH 5.1+ Impala services. @param new_llama_host_id: id of the host where the second Llama role will be added. @param zk_service_name: Name of the ZooKeeper service to use for auto-failover. If Impala's ZooKeeper dependency is already set, then that ZooKeeper service will be used for auto-failover, and this parameter may be omitted. @param new_llama_role_name: Name of the new Llama role. If omitted, a name will be generated automatically. @return: Reference to the submitted command. @since: API v8 """ args = dict( newLlamaHostId = new_llama_host_id, zkServiceName = zk_service_name, newLlamaRoleName = new_llama_role_name ) return self._cmd('impalaEnableLlamaHa', data=args, api_version=8)
Enable high availability for an Impala Llama ApplicationMaster. This command only applies to CDH 5.1+ Impala services. @param new_llama_host_id: id of the host where the second Llama role will be added. @param zk_service_name: Name of the ZooKeeper service to use for auto-failover. If Impala's ZooKeeper dependency is already set, then that ZooKeeper service will be used for auto-failover, and this parameter may be omitted. @param new_llama_role_name: Name of the new Llama role. If omitted, a name will be generated automatically. @return: Reference to the submitted command. @since: API v8
def start(vm_name, call=None): ''' Call GCE 'start on the instance. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt-cloud -a start myinstance ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) conn = get_conn() __utils__['cloud.fire_event']( 'event', 'start instance', 'salt/cloud/{0}/starting'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) result = conn.ex_start_node( conn.ex_get_node(vm_name) ) __utils__['cloud.fire_event']( 'event', 'start instance', 'salt/cloud/{0}/started'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return result
Call GCE 'start on the instance. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt-cloud -a start myinstance
def get_command_arg_list(self, command_name: str, to_parse: Union[Statement, str], preserve_quotes: bool) -> Tuple[Statement, List[str]]: """ Called by the argument_list and argparse wrappers to retrieve just the arguments being passed to their do_* methods as a list. :param command_name: name of the command being run :param to_parse: what is being passed to the do_* method. It can be one of two types: 1. An already parsed Statement 2. An argument string in cases where a do_* method is explicitly called e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create' In this case, the string will be converted to a Statement and returned along with the argument list. :param preserve_quotes: if True, then quotes will not be stripped from the arguments :return: A tuple containing: The Statement used to retrieve the arguments The argument list """ # Check if to_parse needs to be converted to a Statement if not isinstance(to_parse, Statement): to_parse = self.parse(command_name + ' ' + to_parse, expand=False) if preserve_quotes: return to_parse, to_parse.arg_list else: return to_parse, to_parse.argv[1:]
Called by the argument_list and argparse wrappers to retrieve just the arguments being passed to their do_* methods as a list. :param command_name: name of the command being run :param to_parse: what is being passed to the do_* method. It can be one of two types: 1. An already parsed Statement 2. An argument string in cases where a do_* method is explicitly called e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create' In this case, the string will be converted to a Statement and returned along with the argument list. :param preserve_quotes: if True, then quotes will not be stripped from the arguments :return: A tuple containing: The Statement used to retrieve the arguments The argument list
def _HasId(self, schedule, entity_id): """Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not. """ try: self._GetById(schedule, entity_id) has = True except KeyError: has = False return has
Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not.
def find_segment(self, ea): """ do a linear search for the given address in the segment list """ for seg in self.seglist: if seg.startea <= ea < seg.endea: return seg
do a linear search for the given address in the segment list
def edit_by_id( self, id_equip_acesso, id_tipo_acesso, fqdn, user, password, enable_pass): """Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_tipo_acesso): raise InvalidParameterError( u'Access type id is invalid or not informed.') equipamento_acesso_map = dict() equipamento_acesso_map['fqdn'] = fqdn equipamento_acesso_map['user'] = user equipamento_acesso_map['pass'] = password equipamento_acesso_map['enable_pass'] = enable_pass equipamento_acesso_map['id_tipo_acesso'] = id_tipo_acesso equipamento_acesso_map['id_equip_acesso'] = id_equip_acesso url = 'equipamentoacesso/edit/' code, xml = self.submit( {'equipamento_acesso': equipamento_acesso_map}, 'POST', url) return self.response(code, xml)
Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0): """ Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements. """ self.wait_for_ready_state_complete() if page_utils.is_xpath_selector(selector): by = By.XPATH if page_utils.is_link_text_selector(selector): selector = page_utils.get_link_text_from_selector(selector) by = By.LINK_TEXT v_elems = page_actions.find_visible_elements(self.driver, selector, by) if limit and limit > 0 and len(v_elems) > limit: v_elems = v_elems[:limit] return v_elems
Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements.