code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def fit_size(min_length: int = 0, max_length: int = None, message=None) -> Filter_T: """ Validate any sized object to ensure the size/length is in a given range [min_length, max_length]. """ def validate(value): length = len(value) if value is not None else 0 if length < min_length or \ (max_length is not None and length > max_length): _raise_failure(message) return value return validate
Validate any sized object to ensure the size/length is in a given range [min_length, max_length].
def get_owner_asset_ids(self, address): """ Get the list of assets owned by an address owner. :param address: ethereum account address, hex str :return: """ block_filter = self._get_event_filter(owner=address) log_items = block_filter.get_all_entries(max_tries=5) did_list = [] for log_i in log_items: did_list.append(id_to_did(log_i.args['_did'])) return did_list
Get the list of assets owned by an address owner. :param address: ethereum account address, hex str :return:
def cancel_spot_requests(self, requests): """Cancel one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list """ ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: req.cancel()
Cancel one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list
def get(self, key, default=None, remote=False): """ Overrides dictionary get behavior to retrieve database objects with support for returning a default. If remote=True then a remote request is made to retrieve the database from the remote server, otherwise the client's locally cached database object is returned. :param str key: Database name used to retrieve the database object. :param str default: Default database name. Defaults to None. :param bool remote: Dictates whether the locally cached database is returned or a remote request is made to retrieve the database from the server. Defaults to False. :returns: Database object """ if not remote: return super(CouchDB, self).get(key, default) db = self._DATABASE_CLASS(self, key) if db.exists(): super(CouchDB, self).__setitem__(key, db) return db return default
Overrides dictionary get behavior to retrieve database objects with support for returning a default. If remote=True then a remote request is made to retrieve the database from the remote server, otherwise the client's locally cached database object is returned. :param str key: Database name used to retrieve the database object. :param str default: Default database name. Defaults to None. :param bool remote: Dictates whether the locally cached database is returned or a remote request is made to retrieve the database from the server. Defaults to False. :returns: Database object
def setup_debug_logging(): """ set up debug logging """ logger = logging.getLogger("xbahn") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(logging.Formatter("%(name)s: %(message)s")) logger.addHandler(ch)
set up debug logging
def check_job_collection_name(self, cloud_service_id, job_collection_id): ''' The Check Name Availability operation checks if a new job collection with the given name may be created, or if it is unavailable. The result of the operation is a Boolean true or false. cloud_service_id: The cloud service id job_collection_id: The name of the job_collection_id. ''' _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_cloud_services_path( cloud_service_id, "scheduler", "jobCollections") path += "?op=checknameavailability&resourceName=" + job_collection_id return self._perform_post(path, None, AvailabilityResponse)
The Check Name Availability operation checks if a new job collection with the given name may be created, or if it is unavailable. The result of the operation is a Boolean true or false. cloud_service_id: The cloud service id job_collection_id: The name of the job_collection_id.
def openid_form(parser, token): """ Render OpenID form. Allows to pre set the provider:: {% openid_form "https://www.google.com/accounts/o8/id" %} Also creates custom button URLs by concatenating all arguments after the provider's URL {% openid_form "https://www.google.com/accounts/o8/id" STATIC_URL "image/for/google.jpg" %} """ bits = get_bits(token) if len(bits) > 1: return FormNode(bits[0], bits[1:]) if len(bits) == 1: return FormNode(bits[0]) return FormNode(None)
Render OpenID form. Allows to pre set the provider:: {% openid_form "https://www.google.com/accounts/o8/id" %} Also creates custom button URLs by concatenating all arguments after the provider's URL {% openid_form "https://www.google.com/accounts/o8/id" STATIC_URL "image/for/google.jpg" %}
def complement(self, other): """ Calculate the complement of `self` and `other`. :param other: Another SimVariableSet instance. :return: The complement result. """ s = SimVariableSet() s.register_variables = self.register_variables - other.register_variables s.register_variable_offsets = self.register_variable_offsets - other.register_variable_offsets s.memory_variables = self.memory_variables - other.memory_variables s.memory_variable_addresses = self.memory_variable_addresses - other.memory_variable_addresses return s
Calculate the complement of `self` and `other`. :param other: Another SimVariableSet instance. :return: The complement result.
def get_meta(self, name, meta_key=None): '''Get the ``content`` attribute of a meta tag ``name``. For example:: head.get_meta('decription') returns the ``content`` attribute of the meta tag with attribute ``name`` equal to ``description`` or ``None``. If a different meta key needs to be matched, it can be specified via the ``meta_key`` parameter:: head.get_meta('og:title', meta_key='property') ''' meta_key = meta_key or 'name' for child in self.meta._children: if isinstance(child, Html) and child.attr(meta_key) == name: return child.attr('content')
Get the ``content`` attribute of a meta tag ``name``. For example:: head.get_meta('decription') returns the ``content`` attribute of the meta tag with attribute ``name`` equal to ``description`` or ``None``. If a different meta key needs to be matched, it can be specified via the ``meta_key`` parameter:: head.get_meta('og:title', meta_key='property')
def date_time_between_dates( self, datetime_start=None, datetime_end=None, tzinfo=None): """ Takes two DateTime objects and returns a random datetime between the two given datetimes. Accepts DateTime objects. :param datetime_start: DateTime :param datetime_end: DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ if datetime_start is None: datetime_start = datetime.now(tzinfo) if datetime_end is None: datetime_end = datetime.now(tzinfo) timestamp = self.generator.random.randint( datetime_to_timestamp(datetime_start), datetime_to_timestamp(datetime_end), ) try: if tzinfo is None: pick = datetime.fromtimestamp(timestamp, tzlocal()) pick = pick.astimezone(tzutc()).replace(tzinfo=None) else: pick = datetime.fromtimestamp(timestamp, tzinfo) except OverflowError: raise OverflowError( "You specified an end date with a timestamp bigger than the maximum allowed on this" " system. Please specify an earlier date.", ) return pick
Takes two DateTime objects and returns a random datetime between the two given datetimes. Accepts DateTime objects. :param datetime_start: DateTime :param datetime_end: DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime
def p_expr_exit(p): '''expr : EXIT | EXIT LPAREN RPAREN | EXIT LPAREN expr RPAREN''' if len(p) == 5: p[0] = ast.Exit(p[3], lineno=p.lineno(1)) else: p[0] = ast.Exit(None, lineno=p.lineno(1))
expr : EXIT | EXIT LPAREN RPAREN | EXIT LPAREN expr RPAREN
def _list_dict(l: Iterator[str], case_insensitive: bool = False): """ return a dictionary with all items of l being the keys of the dictionary If argument case_insensitive is non-zero ldap.cidict.cidict will be used for case-insensitive string keys """ if case_insensitive: raise NotImplementedError() d = tldap.dict.CaseInsensitiveDict() else: d = {} for i in l: d[i] = None return d
return a dictionary with all items of l being the keys of the dictionary If argument case_insensitive is non-zero ldap.cidict.cidict will be used for case-insensitive string keys
def get_attribute(self, obj, attribute): """ Returns single object attribute. :param obj: requested object. :param attribute: requested attribute to query. :returns: returned value. :rtype: str """ raw_return = self.send_command_return(obj, attribute, '?') if len(raw_return) > 2 and raw_return[0] == '"' and raw_return[-1] == '"': return raw_return[1:-1] return raw_return
Returns single object attribute. :param obj: requested object. :param attribute: requested attribute to query. :returns: returned value. :rtype: str
def trailing_stop_loss(self, accountID, **kwargs): """ Shortcut to create a Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=TrailingStopLossOrderRequest(**kwargs) )
Shortcut to create a Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
def population_counts( self, population_size, weighted=True, include_missing=False, include_transforms_for_dims=None, prune=False, ): """Return counts scaled in proportion to overall population. The return value is a numpy.ndarray object. Count values are scaled proportionally to approximate their value if the entire population had been sampled. This calculation is based on the estimated size of the population provided as *population size*. The remaining arguments have the same meaning as they do for the `.proportions()` method. Example:: >>> cube = CrunchCube(fixt_cat_x_cat) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) >>> cube.population_counts(9000) np.array([ [3000, 1200], [3000, 1800], ]) """ population_counts = [ slice_.population_counts( population_size, weighted=weighted, include_missing=include_missing, include_transforms_for_dims=include_transforms_for_dims, prune=prune, ) for slice_ in self.slices ] if len(population_counts) > 1: return np.array(population_counts) return population_counts[0]
Return counts scaled in proportion to overall population. The return value is a numpy.ndarray object. Count values are scaled proportionally to approximate their value if the entire population had been sampled. This calculation is based on the estimated size of the population provided as *population size*. The remaining arguments have the same meaning as they do for the `.proportions()` method. Example:: >>> cube = CrunchCube(fixt_cat_x_cat) >>> cube.as_array() np.array([ [5, 2], [5, 3], ]) >>> cube.population_counts(9000) np.array([ [3000, 1200], [3000, 1800], ])
def __flush_buffer(self): """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = StringIO()
Flush the buffer contents out to a chunk.
def vertex_to_entity_path(vertex_path, graph, entities, vertices=None): """ Convert a path of vertex indices to a path of entity indices. Parameters ---------- vertex_path : (n,) int Ordered list of vertex indices representing a path graph : nx.Graph Vertex connectivity entities : (m,) list Entity objects vertices : (p, dimension) float Vertex points in space Returns ---------- entity_path : (q,) int Entity indices which make up vertex_path """ def edge_direction(a, b): """ Given two edges, figure out if the first needs to be reversed to keep the progression forward. [1,0] [1,2] -1 1 [1,0] [2,1] -1 -1 [0,1] [1,2] 1 1 [0,1] [2,1] 1 -1 Parameters ------------ a : (2,) int b : (2,) int Returns ------------ a_direction : int b_direction : int """ if a[0] == b[0]: return -1, 1 elif a[0] == b[1]: return -1, -1 elif a[1] == b[0]: return 1, 1 elif a[1] == b[1]: return 1, -1 else: msg = 'edges not connected!' msg += '\nvertex_path: {}'.format(vertex_path) msg += '\nentity_path: {}'.format(entity_path) msg += '\nentity[a]: {}'.format(entities[ea].points) msg += '\nentity[b]: {}'.format(entities[eb].points) constants.log.warning(msg) return None, None if vertices is None or vertices.shape[1] != 2: ccw_direction = 1 else: ccw_check = is_ccw(vertices[np.append(vertex_path, vertex_path[0])]) ccw_direction = (ccw_check * 2) - 1 # make sure vertex path is correct type vertex_path = np.asanyarray(vertex_path, dtype=np.int64) # we will be saving entity indexes entity_path = [] # loop through pairs of vertices for i in np.arange(len(vertex_path) + 1): # get two wrapped vertex positions vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path)) vertex_index = vertex_path[vertex_path_pos] entity_index = graph.get_edge_data(*vertex_index)['entity_index'] entity_path.append(entity_index) # remove duplicate entities and order CCW entity_path = grouping.unique_ordered(entity_path)[::ccw_direction] # check to make sure there is more than one entity if len(entity_path) == 1: # apply CCW reverse in place if necessary if ccw_direction < 0: index = entity_path[0] entities[index].reverse() return entity_path # traverse the entity path and reverse entities in place to # align with this path ordering round_trip = np.append(entity_path, entity_path[0]) round_trip = zip(round_trip[:-1], round_trip[1:]) for ea, eb in round_trip: da, db = edge_direction(entities[ea].end_points, entities[eb].end_points) if da is not None: entities[ea].reverse(direction=da) entities[eb].reverse(direction=db) entity_path = np.array(entity_path) return entity_path
Convert a path of vertex indices to a path of entity indices. Parameters ---------- vertex_path : (n,) int Ordered list of vertex indices representing a path graph : nx.Graph Vertex connectivity entities : (m,) list Entity objects vertices : (p, dimension) float Vertex points in space Returns ---------- entity_path : (q,) int Entity indices which make up vertex_path
def identity(self): """Return this partition information as a PartitionId.""" if self.dataset is None: # The relationship will be null until the object is committed s = object_session(self) ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one() else: ds = self.dataset d = { 'id': self.id, 'vid': self.vid, 'name': self.name, 'vname': self.vname, 'ref': self.ref, 'space': self.space, 'time': self.time, 'table': self.table_name, 'grain': self.grain, 'variant': self.variant, 'segment': self.segment, 'format': self.format if self.format else 'db' } return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items())))
Return this partition information as a PartitionId.
def GET(self, courseid): # pylint: disable=arguments-differ """ GET request """ course = self.get_course(courseid) return self.show_page(course)
GET request
def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return:
def expand(self, url): """Base expand method. Only visits the link, and return the response url""" url = self.clean_url(url) response = self._get(url) if response.ok: return response.url raise ExpandingErrorException
Base expand method. Only visits the link, and return the response url
def start(config, bugnumber=""): """Create a new topic branch.""" repo = config.repo if bugnumber: summary, bugnumber, url = get_summary(config, bugnumber) else: url = None summary = None if summary: summary = input('Summary ["{}"]: '.format(summary)).strip() or summary else: summary = input("Summary: ").strip() branch_name = "" if bugnumber: if is_github({"bugnumber": bugnumber, "url": url}): branch_name = "{}-".format(bugnumber) else: branch_name = "{}-".format(bugnumber) def clean_branch_name(string): string = re.sub(r"\s+", " ", string) string = string.replace(" ", "-") string = string.replace("->", "-").replace("=>", "-") for each in "@%^&:'\"/(),[]{}!.?`$<>#*;=": string = string.replace(each, "") string = re.sub("-+", "-", string) string = string.strip("-") return string.lower().strip() branch_name += clean_branch_name(summary) if not branch_name: error_out("Must provide a branch name") # Check that the branch doesn't already exist found = list(find(repo, branch_name, exact=True)) if found: error_out("There is already a branch called {!r}".format(found[0].name)) new_branch = repo.create_head(branch_name) new_branch.checkout() if config.verbose: click.echo("Checkout out new branch: {}".format(branch_name)) save(config.configfile, summary, branch_name, bugnumber=bugnumber, url=url)
Create a new topic branch.
def _set_Buffer(self, v, load=False): """ Setter method for Buffer, mapped from YANG variable /rbridge_id/threshold_monitor/Buffer (container) If this variable is read-only (config: false) in the source YANG file, then _set_Buffer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_Buffer() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=Buffer.Buffer, is_container='container', presence=False, yang_name="Buffer", rest_name="Buffer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure settings for component:Buffer', u'cli-compact-syntax': None, u'callpoint': u'BufferMonitor', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """Buffer must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=Buffer.Buffer, is_container='container', presence=False, yang_name="Buffer", rest_name="Buffer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure settings for component:Buffer', u'cli-compact-syntax': None, u'callpoint': u'BufferMonitor', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""", }) self.__Buffer = t if hasattr(self, '_set'): self._set()
Setter method for Buffer, mapped from YANG variable /rbridge_id/threshold_monitor/Buffer (container) If this variable is read-only (config: false) in the source YANG file, then _set_Buffer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_Buffer() directly.
def selectrowindex(self, window_name, object_name, row_index): """ Select row index @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_index: Row index to select @type row_index: integer @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) count = len(object_handle.AXRows) if row_index < 0 or row_index > count: raise LdtpServerException('Row index out of range: %d' % row_index) cell = object_handle.AXRows[row_index] if not cell.AXSelected: object_handle.activate() cell.AXSelected = True else: # Selected pass return 1
Select row index @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_index: Row index to select @type row_index: integer @return: 1 on success. @rtype: integer
def set_units(self, unit): """Set the unit for this data point Unit, as with data_type, are actually associated with the stream and not the individual data point. As such, changing this within a stream is not encouraged. Setting the unit on the data point is useful when the stream might be created with the write of a data point. """ self._units = validate_type(unit, type(None), *six.string_types)
Set the unit for this data point Unit, as with data_type, are actually associated with the stream and not the individual data point. As such, changing this within a stream is not encouraged. Setting the unit on the data point is useful when the stream might be created with the write of a data point.
def multi_index_insert_row(df, index_row, values_row): """ Return a new dataframe with a row inserted for a multi-index dataframe. This will sort the rows according to the ordered multi-index levels. """ row_index = pd.MultiIndex(levels=[[i] for i in index_row], labels=[[0] for i in index_row]) row = pd.DataFrame(values_row, index=row_index, columns=df.columns) df = pd.concat((df, row)) if df.index.lexsort_depth == len(index_row) and df.index[-2] < df.index[-1]: # We've just appended a row to an already-sorted dataframe return df # The df wasn't sorted or the row has to be put in the middle somewhere return df.sort_index()
Return a new dataframe with a row inserted for a multi-index dataframe. This will sort the rows according to the ordered multi-index levels.
def _resample(self, arrays, ji_windows): """Resample all arrays with potentially different resolutions to a common resolution.""" # get a destination array template win_dst = ji_windows[self.dst_res] aff_dst = self._layer_meta[self._res_indices[self.dst_res][0]]["transform"] arrays_dst = list() for i, array in enumerate(arrays): arr_dst = np.zeros((int(win_dst.height), int(win_dst.width))) if self._layer_resolution[i] > self.dst_res: resampling = getattr(Resampling, self.upsampler) elif self._layer_resolution[i] < self.dst_res: resampling = getattr(Resampling, self.downsampler) else: arrays_dst.append(array.copy()) continue reproject(array, arr_dst, # arr_dst[0, :, :, i], src_transform=self._layer_meta[i]["transform"], dst_transform=aff_dst, src_crs=self._layer_meta[0]["crs"], dst_crs=self._layer_meta[0]["crs"], resampling=resampling) arrays_dst.append(arr_dst.copy()) arrays_dst = np.stack(arrays_dst, axis=2) # n_images x n x m x 10 would be the synergise format return arrays_dst
Resample all arrays with potentially different resolutions to a common resolution.
def setup_versioneer(): """ Generate (temporarily) versioneer.py file in project root directory :return: """ try: # assume versioneer.py was generated using "versioneer install" command import versioneer versioneer.get_version() except ImportError: # it looks versioneer.py is missing # lets assume that versioneer package is installed # and versioneer binary is present in $PATH import subprocess try: # call versioneer install to generate versioneer.py subprocess.check_output(["versioneer", "install"]) except OSError: # it looks versioneer is missing from $PATH # probably versioneer is installed in some user directory # query pip for list of files in versioneer package # line below is equivalen to putting result of # "pip show -f versioneer" command to string output output = pip_command_output(["show", "-f", "versioneer"]) # now we parse the results import os # find absolute path where *versioneer package* was installed # and store it in main_path main_path = [x[len("Location: "):] for x in output.splitlines() if x.startswith("Location")][0] # find path relative to main_path where # *versioneer binary* was installed bin_path = [x[len(" "):] for x in output.splitlines() if x.endswith(os.path.sep + "versioneer")][0] # exe_path is absolute path to *versioneer binary* exe_path = os.path.join(main_path, bin_path) # call versioneer install to generate versioneer.py # line below is equivalent to running in terminal # "python versioneer install" subprocess.check_output(["python", exe_path, "install"])
Generate (temporarily) versioneer.py file in project root directory :return:
def matches_querytime(instance, querytime): """ Checks whether the given instance satisfies the given QueryTime object. :param instance: an instance of Versionable :param querytime: QueryTime value to check against """ if not querytime.active: return True if not querytime.time: return instance.version_end_date is None return (instance.version_start_date <= querytime.time and (instance.version_end_date is None or instance.version_end_date > querytime.time))
Checks whether the given instance satisfies the given QueryTime object. :param instance: an instance of Versionable :param querytime: QueryTime value to check against
def _set_icmp(self, v, load=False): """ Setter method for icmp, mapped from YANG variable /rbridge_id/interface/ve/ip/icmp (container) If this variable is read-only (config: false) in the source YANG file, then _set_icmp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_icmp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'118', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpVeIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """icmp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'118', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpVeIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)""", }) self.__icmp = t if hasattr(self, '_set'): self._set()
Setter method for icmp, mapped from YANG variable /rbridge_id/interface/ve/ip/icmp (container) If this variable is read-only (config: false) in the source YANG file, then _set_icmp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_icmp() directly.
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set): """ Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models. """ group_psf = None for star in star_group: psf_to_add = template_psf_model.copy() for param_tab_name, param_name in pars_to_set.items(): setattr(psf_to_add, param_name, star[param_tab_name]) if group_psf is None: # this is the first one only group_psf = psf_to_add else: group_psf += psf_to_add return group_psf
Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models.
def subtree(events): """selects sub-tree events""" stack = 0 for obj in events: if obj['type'] == ENTER: stack += 1 elif obj['type'] == EXIT: if stack == 0: break stack -= 1 yield obj
selects sub-tree events
def _generate_replacement(interface_number, segment_number): """ This will generate replacement string for {port0} => {port9} {segment0} => {segment9} """ replacements = {} for i in range(0, 9): replacements["port" + str(i)] = interface_number + i replacements["segment" + str(i)] = segment_number + i return replacements
This will generate replacement string for {port0} => {port9} {segment0} => {segment9}
def count(a, axis=None): """ Count the non-masked elements of the array along the given axis. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :return: The Array representing the requested mean. :rtype: Array """ axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) return _Aggregation(a, axes[0], _CountStreamsHandler, _CountMaskedStreamsHandler, np.dtype('i'), {})
Count the non-masked elements of the array along the given axis. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :return: The Array representing the requested mean. :rtype: Array
def special_type(self): """ [str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告, ‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用) """ try: return self.__dict__["special_type"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'special_type' ".format(self.order_book_id) )
[str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告, ‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用)
def create_from_fits(cls, fitsfile, norm_type='flux'): """Build a TSCube object from a fits file created by gttscube Parameters ---------- fitsfile : str Path to the tscube FITS file. norm_type : str String specifying the quantity used for the normalization """ tsmap = WcsNDMap.read(fitsfile) tab_e = Table.read(fitsfile, 'EBOUNDS') tab_s = Table.read(fitsfile, 'SCANDATA') tab_f = Table.read(fitsfile, 'FITDATA') tab_e = convert_sed_cols(tab_e) tab_s = convert_sed_cols(tab_s) tab_f = convert_sed_cols(tab_f) emin = np.array(tab_e['e_min']) emax = np.array(tab_e['e_max']) try: if str(tab_e['e_min'].unit) == 'keV': emin /= 1000. except: pass try: if str(tab_e['e_max'].unit) == 'keV': emax /= 1000. except: pass nebins = len(tab_e) npred = tab_e['ref_npred'] ndim = len(tsmap.data.shape) if ndim == 2: cube_shape = (tsmap.data.shape[0], tsmap.data.shape[1], nebins) elif ndim == 1: cube_shape = (tsmap.data.shape[0], nebins) else: raise RuntimeError("Counts map has dimension %i" % (ndim)) refSpec = ReferenceSpec.create_from_table(tab_e) nll_vals = -np.array(tab_s["dloglike_scan"]) norm_vals = np.array(tab_s["norm_scan"]) axis = MapAxis.from_edges(np.concatenate((emin, emax[-1:])), interp='log') geom_3d = tsmap.geom.to_cube([axis]) tscube = WcsNDMap(geom_3d, np.rollaxis(tab_s["ts"].reshape(cube_shape), 2, 0)) ncube = WcsNDMap(geom_3d, np.rollaxis(tab_s["norm"].reshape(cube_shape), 2, 0)) nmap = WcsNDMap(tsmap.geom, tab_f['fit_norm'].reshape(tsmap.data.shape)) ref_colname = 'ref_%s' % norm_type norm_vals *= tab_e[ref_colname][np.newaxis, :, np.newaxis] return cls(tsmap, nmap, tscube, ncube, norm_vals, nll_vals, refSpec, norm_type)
Build a TSCube object from a fits file created by gttscube Parameters ---------- fitsfile : str Path to the tscube FITS file. norm_type : str String specifying the quantity used for the normalization
def triplet_loss(anchor, positive, negative, margin, extra=False, scope="triplet_loss"): r"""Loss for Triplet networks as described in the paper: `FaceNet: A Unified Embedding for Face Recognition and Clustering <https://arxiv.org/abs/1503.03832>`_ by Schroff et al. Learn embeddings from an anchor point and a similar input (positive) as well as a not-similar input (negative). Intuitively, a matching pair (anchor, positive) should have a smaller relative distance than a non-matching pair (anchor, negative). .. math:: \max(0, m + \Vert a-p\Vert^2 - \Vert a-n\Vert^2) Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. margin (float): horizon for negative examples extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) """ with tf.name_scope(scope): d_pos = tf.reduce_sum(tf.square(anchor - positive), 1) d_neg = tf.reduce_sum(tf.square(anchor - negative), 1) loss = tf.reduce_mean(tf.maximum(0., margin + d_pos - d_neg)) if extra: pos_dist = tf.reduce_mean(tf.sqrt(d_pos + 1e-10), name='pos-dist') neg_dist = tf.reduce_mean(tf.sqrt(d_neg + 1e-10), name='neg-dist') return loss, pos_dist, neg_dist else: return loss
r"""Loss for Triplet networks as described in the paper: `FaceNet: A Unified Embedding for Face Recognition and Clustering <https://arxiv.org/abs/1503.03832>`_ by Schroff et al. Learn embeddings from an anchor point and a similar input (positive) as well as a not-similar input (negative). Intuitively, a matching pair (anchor, positive) should have a smaller relative distance than a non-matching pair (anchor, negative). .. math:: \max(0, m + \Vert a-p\Vert^2 - \Vert a-n\Vert^2) Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. margin (float): horizon for negative examples extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
def healthy(self, url): '''determine if a resource is healthy based on an accepted response (200) or redirect (301) Parameters ========== url: the URL to check status for, based on the status_code of HEAD ''' response = requests.get(url) status_code = response.status_code if status_code != 200: bot.error('%s, response status code %s.' %(url, status_code)) return False return True
determine if a resource is healthy based on an accepted response (200) or redirect (301) Parameters ========== url: the URL to check status for, based on the status_code of HEAD
def date(self, date): """Set File Occurrence date.""" self._occurrence_data['date'] = self._utils.format_datetime( date, date_format='%Y-%m-%dT%H:%M:%SZ' )
Set File Occurrence date.
def pump_reader(self): """ Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`. """ origin, message = self.transport.read_packet() if isinstance(origin, MessageTargetWatch): self._handle_watch_message(message) else: self._broadcast_transport_message(origin, message)
Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`.
def fast_comp(seq1, seq2, transpositions=False): """Compute the distance between the two sequences `seq1` and `seq2` up to a maximum of 2 included, and return it. If the edit distance between the two sequences is higher than that, -1 is returned. If `transpositions` is `True`, transpositions will be taken into account for the computation of the distance. This can make a difference, e.g.: >>> fast_comp("abc", "bac", transpositions=False) 2 >>> fast_comp("abc", "bac", transpositions=True) 1 This is faster than `levenshtein` by an order of magnitude, but on the other hand is of limited use. The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`. I've added transpositions support to the original code. """ replace, insert, delete = "r", "i", "d" L1, L2 = len(seq1), len(seq2) if L1 < L2: L1, L2 = L2, L1 seq1, seq2 = seq2, seq1 ldiff = L1 - L2 if ldiff == 0: models = (insert+delete, delete+insert, replace+replace) elif ldiff == 1: models = (delete+replace, replace+delete) elif ldiff == 2: models = (delete+delete,) else: return -1 res = 3 for model in models: i = j = c = 0 while (i < L1) and (j < L2): if seq1[i] != seq2[j]: c = c+1 if 2 < c: break if transpositions and ldiff != 2 \ and i < L1 - 1 and j < L2 - 1 \ and seq1[i+1] == seq2[j] and seq1[i] == seq2[j+1]: i, j = i+2, j+2 else: cmd = model[c-1] if cmd == delete: i = i+1 elif cmd == insert: j = j+1 else: assert cmd == replace i,j = i+1, j+1 else: i,j = i+1, j+1 if 2 < c: continue elif i < L1: if L1-i <= model[c:].count(delete): c = c + (L1-i) else: continue elif j < L2: if L2-j <= model[c:].count(insert): c = c + (L2-j) else: continue if c < res: res = c if res == 3: res = -1 return res
Compute the distance between the two sequences `seq1` and `seq2` up to a maximum of 2 included, and return it. If the edit distance between the two sequences is higher than that, -1 is returned. If `transpositions` is `True`, transpositions will be taken into account for the computation of the distance. This can make a difference, e.g.: >>> fast_comp("abc", "bac", transpositions=False) 2 >>> fast_comp("abc", "bac", transpositions=True) 1 This is faster than `levenshtein` by an order of magnitude, but on the other hand is of limited use. The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`. I've added transpositions support to the original code.
def clean_cell(self, cell, cell_type): """ Uses the type of field (from the mapping) to determine how to clean and format the cell. """ try: # Get rid of non-ASCII characters cell = cell.encode('ascii', 'ignore').decode() if cell_type == 'D': cell = datetime.strptime(cell, '%Y%m%d') elif cell_type == 'I': cell = int(cell) elif cell_type == 'N': cell = Decimal(cell) else: cell = cell.upper() if len(cell) > 50: cell = cell[0:50] if not cell or cell in NULL_TERMS: cell = None except: cell = None return cell
Uses the type of field (from the mapping) to determine how to clean and format the cell.
def send_calibrate_barometer(self): """Request barometer calibration.""" calibration_command = self.message_factory.command_long_encode( self._handler.target_system, 0, # target_system, target_component mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, # command 0, # confirmation 0, # param 1, 1: gyro calibration, 3: gyro temperature calibration 0, # param 2, 1: magnetometer calibration 1, # param 3, 1: ground pressure calibration 0, # param 4, 1: radio RC calibration, 2: RC trim calibration 0, # param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration 0, # param 6, 2: airspeed calibration 0, # param 7, 1: ESC calibration, 3: barometer temperature calibration ) self.send_mavlink(calibration_command)
Request barometer calibration.
def as_dict(self): """ Json-serializable dict representation of Dos. """ return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "efermi": self.efermi, "energies": list(self.energies), "densities": {str(spin): list(dens) for spin, dens in self.densities.items()}}
Json-serializable dict representation of Dos.
def parse_workflow_declaration(self, wf_declaration_subAST): ''' Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex} ''' var_map = OrderedDict() var_name = self.parse_declaration_name(wf_declaration_subAST.attr("name")) var_type = self.parse_declaration_type(wf_declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(wf_declaration_subAST.attr("expression"), es='') var_map['name'] = var_name var_map['type'] = var_type var_map['value'] = var_expressn return var_name, var_map
Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex}
def temp_db(db, name=None): """ A context manager that creates a temporary database. Useful for automated tests. Parameters ---------- db: object a preconfigured DB object name: str, optional name of the database to be created. (default: globally unique name) """ if name is None: name = temp_name() db.create(name) if not db.exists(name): raise DatabaseError('failed to create database %s!') try: yield name finally: db.drop(name) if db.exists(name): raise DatabaseError('failed to drop database %s!')
A context manager that creates a temporary database. Useful for automated tests. Parameters ---------- db: object a preconfigured DB object name: str, optional name of the database to be created. (default: globally unique name)
def app_update_state(app_id,state): """ update app state """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() c.execute("UPDATE app SET state='{0}',change_at='{1}' WHERE id='{2}'".format(state, create_at, app_id)) conn.commit() conn.close() print 'UPDATE app %s state to %s succeed!' % (app_id,state) except Exception, e: raise RuntimeError( 'update app %s state to %s failed! %s' % (app_id,state,e) )
update app state
def pkg_resources_env(self, platform_str): """Returns a dict that can be used in place of packaging.default_environment.""" os_name = '' platform_machine = '' platform_release = '' platform_system = '' platform_version = '' sys_platform = '' if 'win' in platform_str: os_name = 'nt' platform_machine = 'AMD64' if '64' in platform_str else 'x86' platform_system = 'Windows' sys_platform = 'win32' elif 'linux' in platform_str: os_name = 'posix' platform_machine = 'x86_64' if '64' in platform_str else 'i686' platform_system = 'Linux' sys_platform = 'linux2' if self._version[0] == 2 else 'linux' elif 'macosx' in platform_str: os_name = 'posix' platform_str = platform_str.replace('.', '_') platform_machine = platform_str.split('_', 3)[-1] # Darwin version are macOS version + 4 platform_release = '{}.0.0'.format(int(platform_str.split('_')[2]) + 4) platform_system = 'Darwin' platform_version = 'Darwin Kernel Version {}'.format(platform_release) sys_platform = 'darwin' return { 'implementation_name': self.interpreter.lower(), 'implementation_version': self.version_str, 'os_name': os_name, 'platform_machine': platform_machine, 'platform_release': platform_release, 'platform_system': platform_system, 'platform_version': platform_version, 'python_full_version': self.version_str, 'platform_python_implementation': self.interpreter, 'python_version': self.version_str[:3], 'sys_platform': sys_platform, }
Returns a dict that can be used in place of packaging.default_environment.
def get_data(): """Retrieve static data from the game.""" run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: m = maps.get("Sequencer") # Arbitrary ladder map. create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap( map_path=m.path, map_data=m.data(run_config))) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy) join = sc_pb.RequestJoinGame(race=sc_common.Random, options=sc_pb.InterfaceOptions(raw=True)) controller.create_game(create) controller.join_game(join) return controller.data()
Retrieve static data from the game.
def _cfactory(attr, func, argtypes, restype, errcheck=None): # type: (Any, str, List[Any], Any, Optional[Callable]) -> None """ Factory to create a ctypes function and automatically manage errors. """ meth = getattr(attr, func) meth.argtypes = argtypes meth.restype = restype if errcheck: meth.errcheck = errcheck
Factory to create a ctypes function and automatically manage errors.
def traverse(self, root="ROOT", indent="", transform=None, stream=sys.stdout): ''' Traverses the C{View} tree and prints its nodes. The nodes are printed converting them to string but other transformations can be specified by providing a method name as the C{transform} parameter. @type root: L{View} @param root: the root node from where the traverse starts @type indent: str @param indent: the indentation string to use to print the nodes @type transform: method @param transform: a method to use to transform the node before is printed ''' if transform is None: # this cannot be a default value, otherwise # TypeError: 'staticmethod' object is not callable # is raised transform = ViewClient.TRAVERSE_CIT if type(root) == types.StringType and root == "ROOT": root = self.root return ViewClient.__traverse(root, indent, transform, stream)
Traverses the C{View} tree and prints its nodes. The nodes are printed converting them to string but other transformations can be specified by providing a method name as the C{transform} parameter. @type root: L{View} @param root: the root node from where the traverse starts @type indent: str @param indent: the indentation string to use to print the nodes @type transform: method @param transform: a method to use to transform the node before is printed
def coderelpath(coderoot, relpath): """Returns the absolute path of the 'relpath' relative to the specified code directory.""" from os import chdir, getcwd, path cd = getcwd() chdir(coderoot) result = path.abspath(relpath) chdir(cd) return result
Returns the absolute path of the 'relpath' relative to the specified code directory.
def run_kernel(self, func, gpu_args, instance): """ Run a compiled kernel instance on a device """ logging.debug('run_kernel %s', instance.name) logging.debug('thread block dims (%d, %d, %d)', *instance.threads) logging.debug('grid dims (%d, %d, %d)', *instance.grid) try: self.dev.run_kernel(func, gpu_args, instance.threads, instance.grid) except Exception as e: if "too many resources requested for launch" in str(e) or "OUT_OF_RESOURCES" in str(e): logging.debug('ignoring runtime failure due to too many resources required') return False else: logging.debug('encountered unexpected runtime failure: ' + str(e)) raise e return True
Run a compiled kernel instance on a device
def sync_close(self): """ 同步关闭 """ if self._closed: return while self._free: conn = self._free.popleft() if not conn.closed: # pragma: no cover conn.sync_close() for conn in self._used: if not conn.closed: # pragma: no cover conn.sync_close() self._terminated.add(conn) self._used.clear() self._closed = True
同步关闭
def bin_dense(M, subsampling_factor=3): """Sum over each block of given subsampling factor, returns a matrix whose dimensions are this much as small (e.g. a 27x27 matrix binned with a subsampling factor equal to 3 will return a 9x9 matrix whose each component is the sum of the corresponding 3x3 block in the original matrix). Remaining columns and rows are summed likewise and added to the end of the new matrix. :note: Will not work for numpy verisons below 1.7 """ m = min(M.shape) n = (m // subsampling_factor) * subsampling_factor if n == 0: return np.array([M.sum()]) N = np.array(M[:n, :n], dtype=np.float64) N = N.reshape(n // subsampling_factor, subsampling_factor, n // subsampling_factor, subsampling_factor).sum(axis=(1, 3)) if m > n: remaining_row = M[n:, :n] remaining_col = M[:n, n:] remaining_square = M[n:m, n:m] R = remaining_row.reshape(m % subsampling_factor, m // subsampling_factor, subsampling_factor).sum(axis=(0, 2)) C = remaining_col.T.reshape(m % subsampling_factor, m // subsampling_factor, subsampling_factor).sum(axis=(0, 2)).T S = remaining_square.sum() N = np.append(N, [R], axis=0) result = np.append(N, np.array([list(C) + [S]]).T, axis=1) else: result = N return result
Sum over each block of given subsampling factor, returns a matrix whose dimensions are this much as small (e.g. a 27x27 matrix binned with a subsampling factor equal to 3 will return a 9x9 matrix whose each component is the sum of the corresponding 3x3 block in the original matrix). Remaining columns and rows are summed likewise and added to the end of the new matrix. :note: Will not work for numpy verisons below 1.7
def _compute_counts(event, time, order=None): """Count right censored and uncensored samples at each unique time point. Parameters ---------- event : array Boolean event indicator. time : array Survival time or time of censoring. order : array or None Indices to order time in ascending order. If None, order will be computed. Returns ------- times : array Unique time points. n_events : array Number of events at each time point. n_at_risk : array Number of samples that are censored or have an event at each time point. """ n_samples = event.shape[0] if order is None: order = numpy.argsort(time, kind="mergesort") uniq_times = numpy.empty(n_samples, dtype=time.dtype) uniq_events = numpy.empty(n_samples, dtype=numpy.int_) uniq_counts = numpy.empty(n_samples, dtype=numpy.int_) i = 0 prev_val = time[order[0]] j = 0 while True: count_event = 0 count = 0 while i < n_samples and prev_val == time[order[i]]: if event[order[i]]: count_event += 1 count += 1 i += 1 uniq_times[j] = prev_val uniq_events[j] = count_event uniq_counts[j] = count j += 1 if i == n_samples: break prev_val = time[order[i]] times = numpy.resize(uniq_times, j) n_events = numpy.resize(uniq_events, j) total_count = numpy.resize(uniq_counts, j) # offset cumulative sum by one total_count = numpy.concatenate(([0], total_count)) n_at_risk = n_samples - numpy.cumsum(total_count) return times, n_events, n_at_risk[:-1]
Count right censored and uncensored samples at each unique time point. Parameters ---------- event : array Boolean event indicator. time : array Survival time or time of censoring. order : array or None Indices to order time in ascending order. If None, order will be computed. Returns ------- times : array Unique time points. n_events : array Number of events at each time point. n_at_risk : array Number of samples that are censored or have an event at each time point.
def calc_effective_diffusivity(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective diffusivity in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ return self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length)
r""" This calculates the effective diffusivity in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
def __set_quantity(self, value): ''' Sets the quantity @param value:str ''' try: if value < 0: raise ValueError() self.__quantity = Decimal(str(value)) except ValueError: raise ValueError("Quantity must be a positive number")
Sets the quantity @param value:str
def remove_lvm_physical_volume(block_device): ''' Remove LVM PV signatures from a given block device. :param block_device: str: Full path of block device to scrub. ''' p = Popen(['pvremove', '-ff', block_device], stdin=PIPE) p.communicate(input='y\n')
Remove LVM PV signatures from a given block device. :param block_device: str: Full path of block device to scrub.
def _switch_tz_offset_sql(self, field_name, tzname): """ Returns the SQL that will convert field_name to UTC from tzname. """ field_name = self.quote_name(field_name) if settings.USE_TZ: if pytz is None: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("This query requires pytz, " "but it isn't installed.") tz = pytz.timezone(tzname) td = tz.utcoffset(datetime.datetime(2000, 1, 1)) def total_seconds(td): if hasattr(td, 'total_seconds'): return td.total_seconds() else: return td.days * 24 * 60 * 60 + td.seconds total_minutes = total_seconds(td) // 60 hours, minutes = divmod(total_minutes, 60) tzoffset = "%+03d:%02d" % (hours, minutes) field_name = "CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset) return field_name
Returns the SQL that will convert field_name to UTC from tzname.
def __regions_russian(self, word): """ Return the regions RV and R2 which are used by the Russian stemmer. In any word, RV is the region after the first vowel, or the end of the word if it contains no vowel. R2 is the region after the first non-vowel following a vowel in R1, or the end of the word if there is no such non-vowel. R1 is the region after the first non-vowel following a vowel, or the end of the word if there is no such non-vowel. :param word: The Russian word whose regions RV and R2 are determined. :type word: str or unicode :return: the regions RV and R2 for the respective Russian word. :rtype: tuple :note: This helper method is invoked by the stem method of the subclass RussianStemmer. It is not to be invoked directly! """ r1 = "" r2 = "" rv = "" vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y") word = (word.replace("i^a", "A") .replace("i^u", "U") .replace("e`", "E")) for i in range(1, len(word)): if word[i] not in vowels and word[i-1] in vowels: r1 = word[i+1:] break for i in range(1, len(r1)): if r1[i] not in vowels and r1[i-1] in vowels: r2 = r1[i+1:] break for i in range(len(word)): if word[i] in vowels: rv = word[i+1:] break r2 = (r2.replace("A", "i^a") .replace("U", "i^u") .replace("E", "e`")) rv = (rv.replace("A", "i^a") .replace("U", "i^u") .replace("E", "e`")) return (rv, r2)
Return the regions RV and R2 which are used by the Russian stemmer. In any word, RV is the region after the first vowel, or the end of the word if it contains no vowel. R2 is the region after the first non-vowel following a vowel in R1, or the end of the word if there is no such non-vowel. R1 is the region after the first non-vowel following a vowel, or the end of the word if there is no such non-vowel. :param word: The Russian word whose regions RV and R2 are determined. :type word: str or unicode :return: the regions RV and R2 for the respective Russian word. :rtype: tuple :note: This helper method is invoked by the stem method of the subclass RussianStemmer. It is not to be invoked directly!
def copyto_file_object(self, query, file_object): """ Gets data from a table into a writable file object :param query: The "COPY { table_name [(column_name[, ...])] | (query) } TO STDOUT [WITH(option[,...])]" query to execute :type query: str :param file_object: A file-like object. Normally the return value of open('file.ext', 'wb') :type file_object: file :raise CartoException: """ response = self.copyto(query) for block in response.iter_content(DEFAULT_CHUNK_SIZE): file_object.write(block)
Gets data from a table into a writable file object :param query: The "COPY { table_name [(column_name[, ...])] | (query) } TO STDOUT [WITH(option[,...])]" query to execute :type query: str :param file_object: A file-like object. Normally the return value of open('file.ext', 'wb') :type file_object: file :raise CartoException:
def solve_sweep_wavelength( self, structure, wavelengths, filename="wavelength_n_effs.dat", plot=True, ): """ Solve for the effective indices of a fixed structure at different wavelengths. Args: structure (Slabs): The target structure to solve for modes. wavelengths (list): A list of wavelengths to sweep over. filename (str): The nominal filename to use when saving the effective indices. Defaults to 'wavelength_n_effs.dat'. plot (bool): `True` if plots should be generates, otherwise `False`. Default is `True`. Returns: list: A list of the effective indices found for each wavelength. """ n_effs = [] for w in tqdm.tqdm(wavelengths, ncols=70): structure.change_wavelength(w) self.solve(structure) n_effs.append(np.real(self.n_effs)) if filename: self._write_n_effs_to_file( n_effs, self._modes_directory + filename, wavelengths ) if plot: if MPL: title = "$n_{eff}$ vs Wavelength" y_label = "$n_{eff}$" else: title = "n_{effs} vs Wavelength" % x_label y_label = "n_{eff}" self._plot_n_effs( self._modes_directory + filename, self._modes_directory + "fraction_te.dat", "Wavelength", "n_{eff}", title, ) return n_effs
Solve for the effective indices of a fixed structure at different wavelengths. Args: structure (Slabs): The target structure to solve for modes. wavelengths (list): A list of wavelengths to sweep over. filename (str): The nominal filename to use when saving the effective indices. Defaults to 'wavelength_n_effs.dat'. plot (bool): `True` if plots should be generates, otherwise `False`. Default is `True`. Returns: list: A list of the effective indices found for each wavelength.
def floor_func(self, addr): """ Return the function who has the greatest address that is less than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function before `addr`. :rtype: Function or None """ try: prev_addr = self._function_map.floor_addr(addr) return self._function_map[prev_addr] except KeyError: return None
Return the function who has the greatest address that is less than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function before `addr`. :rtype: Function or None
def get(self, instance, aslist=False, **kwargs): """Get (multi-)references """ refs = self.get_versioned_references_for(instance) if not self.multiValued: if len(refs) > 1: logger.warning("Found {} references for non-multivalued " "reference field '{}' of {}".format( len(refs), self.getName(), repr(instance))) if not aslist: if refs: refs = refs[0] else: refs = None if not self.referencesSortable or not hasattr( aq_base(instance), "at_ordered_refs"): return refs refs = instance.at_ordered_refs order = refs[self.relationship] if order is None: return refs by_uid = dict(map(lambda ob: (api.get_uid(ob), ob), refs)) return [by_uid[uid] for uid in order if uid in by_uid]
Get (multi-)references
def bulk_export(self, ids, exclude_captures=False): """Bulk export a set of results. :param ids: Int list of result IDs. :rtype: tuple `(io.BytesIO, 'filename')` """ return self.service.bulk_export(self.base, ids, params={'exclude_captures': exclude_captures})
Bulk export a set of results. :param ids: Int list of result IDs. :rtype: tuple `(io.BytesIO, 'filename')`
def clean_regex(regex): """ Escape any regex special characters other than alternation. :param regex: regex from datatables interface :type regex: str :rtype: str with regex to use with database """ # copy for return ret_regex = regex # these characters are escaped (all except alternation | and escape \) # see http://www.regular-expressions.info/refquick.html escape_chars = '[^$.?*+(){}' # remove any escape chars ret_regex = ret_regex.replace('\\', '') # escape any characters which are used by regex # could probably concoct something incomprehensible using re.sub() but # prefer to write clear code with this loop # note expectation that no characters have already been escaped for c in escape_chars: ret_regex = ret_regex.replace(c, '\\' + c) # remove any double alternations until these don't exist any more while True: old_regex = ret_regex ret_regex = ret_regex.replace('||', '|') if old_regex == ret_regex: break # if last char is alternation | remove it because this # will cause operational error # this can happen as user is typing in global search box while len(ret_regex) >= 1 and ret_regex[-1] == '|': ret_regex = ret_regex[:-1] # and back to the caller return ret_regex
Escape any regex special characters other than alternation. :param regex: regex from datatables interface :type regex: str :rtype: str with regex to use with database
def raw(self, sql): """ Execute raw sql :Parameters: - sql: string, sql to be executed :Return: the result of this execution If it's a select, return a list with each element be a DataRow instance Otherwise return raw result from the cursor (Should be insert or update or delete) """ res = self.cursor.execute(sql) if self.cursor.description is None: return res rows = self.cursor.fetchall() columns = [d[0] for d in self.cursor.description] structured_rows = [] for row in rows: data = {} for val, col in zip(row, columns): data[col] = val structured_rows.append(DataRow(data)) return structured_rows
Execute raw sql :Parameters: - sql: string, sql to be executed :Return: the result of this execution If it's a select, return a list with each element be a DataRow instance Otherwise return raw result from the cursor (Should be insert or update or delete)
def splitbins(t, trace=0): """t, trace=0 -> (t1, t2, shift). Split a table to save space. t is a sequence of ints. This function can be useful to save space if many of the ints are the same. t1 and t2 are lists of ints, and shift is an int, chosen to minimize the combined size of t1 and t2 (in C code), and where for each i in range(len(t)), t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] where mask is a bitmask isolating the last "shift" bits. If optional arg trace is non-zero (default zero), progress info is printed to sys.stderr. The higher the value, the more info you'll get. """ if trace: def dump(t1, t2, shift, bytes): print("%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes), file=sys.stderr) print("Size of original table:", len(t)*getsize(t), \ "bytes", file=sys.stderr) n = len(t)-1 # last valid index maxshift = 0 # the most we can shift n and still have something left if n > 0: while n >> 1: n >>= 1 maxshift += 1 del n bytes = sys.maxsize # smallest total size so far t = tuple(t) # so slices can be dict keys for shift in range(maxshift + 1): t1 = [] t2 = [] size = 2**shift bincache = {} for i in range(0, len(t), size): bin = t[i:i+size] index = bincache.get(bin) if index is None: index = len(t2) bincache[bin] = index t2.extend(bin) t1.append(index >> shift) # determine memory size b = len(t1)*getsize(t1) + len(t2)*getsize(t2) if trace > 1: dump(t1, t2, shift, b) if b < bytes: best = t1, t2, shift bytes = b t1, t2, shift = best if trace: print("Best:", end=' ', file=sys.stderr) dump(t1, t2, shift, bytes) if __debug__: # exhaustively verify that the decomposition is correct mask = ~((~0) << shift) # i.e., low-bit mask of shift bits for i in range(len(t)): assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] return best
t, trace=0 -> (t1, t2, shift). Split a table to save space. t is a sequence of ints. This function can be useful to save space if many of the ints are the same. t1 and t2 are lists of ints, and shift is an int, chosen to minimize the combined size of t1 and t2 (in C code), and where for each i in range(len(t)), t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] where mask is a bitmask isolating the last "shift" bits. If optional arg trace is non-zero (default zero), progress info is printed to sys.stderr. The higher the value, the more info you'll get.
def connected_client(self): """Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: >>> with (yield pool.connected_client()) as client: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING") """ future = self.get_connected_client() cb = functools.partial(self._connected_client_release_cb, future) return ContextManagerFuture(future, cb)
Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: >>> with (yield pool.connected_client()) as client: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING")
def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): # TODO: # should have guarantess on what x, y can be type-wise # Extension Dtypes are not called here # Checking that cases that were once handled here are no longer # reachable. assert not (is_categorical_dtype(y) and not is_scalar(y)) if is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) elif is_datetimelike_v_numeric(x, y): return invalid_comparison(x, y, op) else: # we want to compare like types # we only want to convert to integer like if # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons # we have a datetime/timedelta and may need to convert assert not needs_i8_conversion(x) mask = None if not is_scalar(y) and needs_i8_conversion(y): mask = isna(x) | isna(y) y = y.view('i8') x = x.view('i8') method = getattr(x, op_name, None) if method is not None: with np.errstate(all='ignore'): result = method(y) if result is NotImplemented: return invalid_comparison(x, y, op) else: result = op(x, y) if mask is not None and mask.any(): result[mask] = masker return result def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: self._get_axis_number(axis) res_name = get_op_result_name(self, other) if isinstance(other, list): # TODO: same for tuples? other = np.asarray(other) if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled " "Series objects") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; pd.CategoricalIndex # behavior is non-canonical GH#19513 res_values = dispatch_to_index_op(op, self, other, pd.Categorical) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior if (isinstance(other, datetime.date) and not isinstance(other, datetime.datetime)): # https://github.com/pandas-dev/pandas/issues/21152 # Compatibility for difference between Series comparison w/ # datetime and date msg = ( "Comparing Series of datetimes with 'datetime.date'. " "Currently, the 'datetime.date' is coerced to a " "datetime. In the future pandas will not coerce, " "and {future}. " "To retain the current behavior, " "convert the 'datetime.date' to a datetime with " "'pd.Timestamp'." ) if op in {operator.lt, operator.le, operator.gt, operator.ge}: future = "a TypeError will be raised" else: future = ( "'the values will not compare equal to the " "'datetime.date'" ) msg = '\n'.join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) other = pd.Timestamp(other) res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) return self._constructor(res_values, index=self.index, name=res_name) elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and not is_scalar(other))): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other) res_values = na_op(self.values, other.values) # rename is needed in case res_name is None and res_values.name # is not. return self._constructor(res_values, index=self.index, name=res_name).rename(res_name) elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast if other.ndim != 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) result = self._constructor(res_values, index=self.index) # rename is needed in case res_name is None and self.name # is not. return result.__finalize__(self).rename(res_name) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None if op is operator.ne: res_values = np.ones(len(self), dtype=bool) else: res_values = np.zeros(len(self), dtype=bool) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') else: values = self.get_values() with np.errstate(all='ignore'): res = na_op(values, other) if is_scalar(res): raise TypeError('Could not compare {typ} type with Series' .format(typ=type(other))) # always return a full value series here res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') wrapper.__name__ = op_name return wrapper
Wrapper function for Series arithmetic operations, to avoid code duplication.
async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, json: dict = None, ssl: bool = True) -> dict: """Wrap the generic request method to add access token, etc.""" return await self._client_request( method, '{0}/{1}'.format(self._host, endpoint), access_token=self._access_token, access_token_expiration=self._access_token_expiration, headers=headers, params=params, json=json, ssl=ssl)
Wrap the generic request method to add access token, etc.
def _sentence(self, words): """Generate a sentence""" db = self.database # Generate 2 words to start a sentence with seed = random.randint(0, db['word_count'] - 3) seed_word, next_word = db['words'][seed], db['words'][seed + 1] w1, w2 = seed_word, next_word # Generate the complete sentence sentence = [] for i in range(0, words - 1): sentence.append(w1) w1, w2 = w2, random.choice(db['freqs'][(w1, w2)]) sentence.append(w2) # Make the sentence respectable sentence = ' '.join(sentence) # Capitalize the sentence sentence = sentence.capitalize() # Remove additional sentence ending puntuation sentence = sentence.replace('.', '') sentence = sentence.replace('!', '') sentence = sentence.replace('?', '') sentence = sentence.replace(':', '') # Remove quote tags sentence = sentence.replace('.', '') sentence = sentence.replace('!', '') sentence = sentence.replace('?', '') sentence = sentence.replace(':', '') sentence = sentence.replace('"', '') # If the last character is not an alphanumeric remove it sentence = re.sub('[^a-zA-Z0-9]$', '', sentence) # Remove excess space sentence = re.sub('\s+', ' ', sentence) # Add a full stop sentence += '.' return sentence
Generate a sentence
def handleSubRectangles(self, images, subRectangles): """ handleSubRectangles(images) Handle the sub-rectangle stuff. If the rectangles are given by the user, the values are checked. Otherwise the subrectangles are calculated automatically. """ if isinstance(subRectangles, (tuple, list)): # xy given directly # Check xy xy = subRectangles if xy is None: xy = (0, 0) if hasattr(xy, '__len__'): if len(xy) == len(images): xy = [xxyy for xxyy in xy] else: raise ValueError("len(xy) doesn't match amount of images.") else: xy = [xy for im in images] xy[0] = (0, 0) else: # Calculate xy using some basic image processing # Check Numpy if np is None: raise RuntimeError("Need Numpy to use auto-subRectangles.") # First make numpy arrays if required for i in range(len(images)): im = images[i] if isinstance(im, Image.Image): tmp = im.convert() # Make without palette a = np.asarray(tmp) if len(a.shape) == 0: raise MemoryError("Too little memory to convert PIL image to array") images[i] = a # Determine the sub rectangles images, xy = self.getSubRectangles(images) # Done return images, xy
handleSubRectangles(images) Handle the sub-rectangle stuff. If the rectangles are given by the user, the values are checked. Otherwise the subrectangles are calculated automatically.
def set_object(self, obj, properties): """Add an object to the definition and set its ``properties``.""" self._objects.add(obj) properties = set(properties) self._properties |= properties pairs = self._pairs for p in self._properties: if p in properties: pairs.add((obj, p)) else: pairs.discard((obj, p))
Add an object to the definition and set its ``properties``.
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value): """Set this key of this edge to this value.""" if (branch, turn, tick) in self._btts: raise TimeError self._btts.add((branch, turn, tick)) graph, orig, dest, key, value = map(self.pack, (graph, orig, dest, key, value)) self._edgevals2set.append( (graph, orig, dest, idx, key, branch, turn, tick, value) )
Set this key of this edge to this value.
def barf(msg, exit=None, f=sys.stderr): '''Exit with a log message (usually a fatal error)''' exit = const('FSQ_FAIL_TMP') if exit is None else exit shout(msg, f) sys.exit(exit)
Exit with a log message (usually a fatal error)
def add_batch(self, batch_id, batch_properties=None): """Adds batch with give ID and list of properties.""" if batch_properties is None: batch_properties = {} if not isinstance(batch_properties, dict): raise ValueError('batch_properties has to be dict, however it was: ' + str(type(batch_properties))) self._data[batch_id] = batch_properties.copy() self._data[batch_id]['images'] = {}
Adds batch with give ID and list of properties.
def schnorr_generate_nonce_pair(self, msg, raw=False, digest=hashlib.sha256): """ Generate a nonce pair deterministically for use with schnorr_partial_sign. """ if not HAS_SCHNORR: raise Exception("secp256k1_schnorr not enabled") msg32 = _hash32(msg, raw, digest) pubnonce = ffi.new('secp256k1_pubkey *') privnonce = ffi.new('char [32]') valid = lib.secp256k1_schnorr_generate_nonce_pair( self.ctx, pubnonce, privnonce, msg32, self.private_key, ffi.NULL, ffi.NULL) assert valid == 1 return pubnonce, privnonce
Generate a nonce pair deterministically for use with schnorr_partial_sign.
def energy(self, sample_like, dtype=np.float): """The energy of the given sample. Args: sample_like (samples_like): A raw sample. `sample_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`, optional): The data type of the returned energies. Defaults to float. Returns: The energy. """ energy, = self.energies(sample_like, dtype=dtype) return energy
The energy of the given sample. Args: sample_like (samples_like): A raw sample. `sample_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`, optional): The data type of the returned energies. Defaults to float. Returns: The energy.
def get_single_item(d): """Get an item from a dict which contains just one item.""" assert len(d) == 1, 'Single-item dict must have just one item, not %d.' % len(d) return next(six.iteritems(d))
Get an item from a dict which contains just one item.
def update_sig(queue): """update signature""" while True: options, sign, vers = queue.get() info("[+] \033[92mChecking signature version:\033[0m %s" % sign) localver = get_local_version(options.mirrordir, sign) remotever = vers[sign] if localver is None or (localver and int(localver) < int(remotever)): info("=> Update required local: %s => remote: %s" % (localver, remotever)) info("=> Downloading signature: %s" % sign) status, code = download_sig(options, sign, remotever) if status: info("=> Downloaded signature: %s" % sign) copy_sig(sign, options, 0) else: if code == 404: error("=> \033[91mSignature:\033[0m %s not found" % sign) error("=> \033[91mDownload failed:\033[0m %s code: %d" % (sign, code)) else: info( "=> No update required L: %s => R: %s" % (localver, remotever)) queue.task_done()
update signature
def find(self, func: Callable[[T], bool]) -> TOption[T]: """ Usage: >>> TList([1, 2, 3, 4, 5]).find(lambda x: x > 3) Option --> 4 >>> TList([1, 2, 3, 4, 5]).find(lambda x: x > 6) Option --> None """ for x in self: if func(x): return TOption(x) return TOption(None)
Usage: >>> TList([1, 2, 3, 4, 5]).find(lambda x: x > 3) Option --> 4 >>> TList([1, 2, 3, 4, 5]).find(lambda x: x > 6) Option --> None
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn
def description(filename): """Provide a short description.""" # This ends up in the Summary header for PKG-INFO and it should be a # one-liner. It will get rendered on the package page just below the # package version header but above the long_description, which ironically # gets stuff into the Description header. It should not include reST, so # pick out the first single line after the double header. with open(filename) as fp: for lineno, line in enumerate(fp): if lineno < 3: continue line = line.strip() if len(line) > 0: return line
Provide a short description.
def _unary_(self, func, inplace=False): ''' :func: unary function to apply to each coordinate :inplace: optional boolean :return: Point Implementation private method. All of the unary operations funnel thru this method to reduce cut-and-paste code and enforce consistent behavior of unary ops. Applies 'func' to self and returns the result. The expected call signature of 'func' is f(a) If 'inplace' is True, the results are stored in 'self', otherwise the results will be stored in a new object. Returns a Point. ''' dst = self if inplace else self.__class__(self) dst.x = func(dst.x) dst.y = func(dst.y) dst.z = func(dst.z) return dst
:func: unary function to apply to each coordinate :inplace: optional boolean :return: Point Implementation private method. All of the unary operations funnel thru this method to reduce cut-and-paste code and enforce consistent behavior of unary ops. Applies 'func' to self and returns the result. The expected call signature of 'func' is f(a) If 'inplace' is True, the results are stored in 'self', otherwise the results will be stored in a new object. Returns a Point.
def sql_dequote_string(s: str) -> str: """ Reverses :func:`sql_quote_string`. """ if len(s) < 2 or s[0] != SQUOTE or s[-1] != SQUOTE: raise ValueError("Not an SQL string literal") s = s[1:-1] # strip off the surrounding quotes return s.replace(DOUBLE_SQUOTE, SQUOTE)
Reverses :func:`sql_quote_string`.
def _create_flow(self, request_handler): """Create the Flow object. The Flow is calculated lazily since we don't know where this app is running until it receives a request, at which point redirect_uri can be calculated and then the Flow object can be constructed. Args: request_handler: webapp.RequestHandler, the request handler. """ if self.flow is None: redirect_uri = request_handler.request.relative_url( self._callback_path) # Usually /oauth2callback self.flow = client.OAuth2WebServerFlow( self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)
Create the Flow object. The Flow is calculated lazily since we don't know where this app is running until it receives a request, at which point redirect_uri can be calculated and then the Flow object can be constructed. Args: request_handler: webapp.RequestHandler, the request handler.
def _prune_some_if_small(self, small_size, a_or_u): "Merge some nodes in the directory, whilst keeping others." # Assert that we're not messing things up. prev_app_size = self.app_size() prev_use_size = self.use_size() keep_nodes = [] prune_app_size = 0 prune_use_size = 0 for node in self._nodes: node_size = node.app_size() if a_or_u else node.use_size() if node_size < small_size: if a_or_u: prune_app_size += node_size prune_use_size += node.use_size() else: prune_app_size += node.app_size() prune_use_size += node_size else: keep_nodes.append(node) # Last "leftover" node? Merge with parent. if len(keep_nodes) == 1 and keep_nodes[-1]._isdir is None: prune_app_size += keep_nodes[-1]._app_size prune_use_size += keep_nodes[-1]._use_size keep_nodes = [] if prune_app_size: if not keep_nodes: # The only node to keep, no "leftovers" here. Move data # to the parent. keep_nodes = None assert self._isdir and self._nodes is not None self._set_size(prune_app_size, prune_use_size) elif keep_nodes and keep_nodes[-1]._isdir is None: # There was already a leftover node. Add the new leftovers. keep_nodes[-1]._add_size(prune_app_size, prune_use_size) else: # Create a new leftover node. keep_nodes.append(DuNode.new_leftovers( self._path, prune_app_size, prune_use_size)) # Update nodes and do the actual assertion. self._nodes = keep_nodes assert prev_app_size == self.app_size(), ( prev_app_size, self.app_size()) assert prev_use_size == self.use_size(), ( prev_use_size, self.use_size())
Merge some nodes in the directory, whilst keeping others.
def model_reaction_limits(model): """Yield model reaction limits as YAML dicts.""" for reaction in sorted(model.reactions, key=lambda r: r.id): equation = reaction.properties.get('equation') if equation is None: continue # Determine the default flux limits. If the value is already at the # default it does not need to be included in the output. lower_default, upper_default = None, None if model.default_flux_limit is not None: if equation.direction.reverse: lower_default = -model.default_flux_limit else: lower_default = 0.0 if equation.direction.forward: upper_default = model.default_flux_limit else: upper_default = 0.0 lower_flux, upper_flux = None, None if reaction.id in model.limits: _, lower, upper = model.limits[reaction.id] lower_flux = _get_output_limit(lower, lower_default) upper_flux = _get_output_limit(upper, upper_default) if lower_flux is not None or upper_flux is not None: d = OrderedDict([('reaction', reaction.id)]) d.update(_generate_limit_items(lower_flux, upper_flux)) yield d
Yield model reaction limits as YAML dicts.
def _update_triplestore(self, es_result, action_list, **kwargs): """ updates the triplestore with success of saves and failues of indexing Args: ----- es_result: the elasticsearch result list action_list: list of elasticsearch action items that were indexed """ idx_time = XsdDatetime(datetime.datetime.utcnow()) uri_keys = {} bnode_keys = {} for item in action_list: try: uri_keys[item['_id']] = item['_source']["uri"] except KeyError: bnode_keys[item['_id']] = item['_id'] error_dict = {} error_bnodes = {} if es_result[1]: for result in es_result[1]: err_item = list(result.values())[0] try: error_dict[uri_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) except KeyError: error_bnodes[bnode_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) if uri_keys: sparql_good = """ DELETE {{ ?s kds:esIndexTime ?esTime . ?s kds:esIndexError ?esError . }} INSERT {{ GRAPH ?g {{ ?s kds:esIndexTime {idx_time} }}. }} WHERE {{ VALUES ?s {{ {subj_list} }} . {{ SELECT DISTINCT ?g ?s ?esTime ?esError {{ GRAPH ?g {{ ?s ?p ?o }} . OPTIONAL {{ ?s kds:esIndexTime ?esTime }} OPTIONAL {{ ?s kds:esIndexError ?esError }} }} }} }} """.format(idx_time=idx_time.sparql, subj_list="<%s>" % ">\n<".join(uri_keys.values())) self.tstore_conn.update_query(sparql_good) # Process any errors that were found. if not error_dict: return # Delete all indexing triples related to the error subjects sparql_error = """ DELETE {{ ?s kds:esIndexTime ?esTime . ?s kds:esIndexError ?esError . }} WHERE {{ VALUES ?s {{ {subj_list} }} . OPTIONAL {{ ?s kds:esIndexTime ?esTime }} OPTIONAL {{ ?s kds:esIndexError ?esError }} }} """.format(subj_list="<%s>" % ">\n<".join(error_dict.keys())) self.tstore_conn.update_query(sparql_error) del sparql_error sparql_update = """ INSERT {{ GRAPH ?g {{ ?s kds:esIndexTime {idx_time} . ?s kds:esIndexError ?esError . }} }} WHERE {{ VALUES (?s ?esError) {{ {error_list} }} . {{ SELECT DISTINCT ?g ?s {{ graph ?g {{?s ?p ?o}} }} }} }}""".format( idx_time=idx_time.sparql, error_list="\n".join(["(<%s> %s)" % (key, val.sparql) for key, val in error_dict.items()])) # Create a turtle data stream of the new errors to upload into the # triplestore self.tstore_conn.update_query(sparql_update) del sparql_update
updates the triplestore with success of saves and failues of indexing Args: ----- es_result: the elasticsearch result list action_list: list of elasticsearch action items that were indexed
def remove_folder(self, tree, prefix): """ Used to remove any empty folders If this folder is empty then it is removed. If the parent is empty as a result, then the parent is also removed, and so on. """ while True: child = tree tree = tree.parent if not child.folders and not child.files: del self.cache[tuple(prefix)] if tree: del tree.folders[prefix.pop()] if not tree or tree.folders or tree.files: break
Used to remove any empty folders If this folder is empty then it is removed. If the parent is empty as a result, then the parent is also removed, and so on.
def _compute_raw_image_norm(self, data): """ Helper function that computes the uncorrected inverse normalization factor of input image data. This quantity is computed as the *sum of all pixel values*. .. note:: This function is intended to be overriden in a subclass if one desires to change the way the normalization factor is computed. """ return np.sum(self._data, dtype=np.float64)
Helper function that computes the uncorrected inverse normalization factor of input image data. This quantity is computed as the *sum of all pixel values*. .. note:: This function is intended to be overriden in a subclass if one desires to change the way the normalization factor is computed.
def igetattr(self, name, context=None, class_context=True): """Infer the possible values of the given variable. :param name: The name of the variable to infer. :type name: str :returns: The inferred possible values. :rtype: iterable(NodeNG or Uninferable) """ # set lookup name since this is necessary to infer on import nodes for # instance context = contextmod.copy_context(context) context.lookupname = name try: attr = self.getattr(name, context, class_context=class_context)[0] for inferred in bases._infer_stmts([attr], context, frame=self): # yield Uninferable object instead of descriptors when necessary if not isinstance(inferred, node_classes.Const) and isinstance( inferred, bases.Instance ): try: inferred._proxied.getattr("__get__", context) except exceptions.AttributeInferenceError: yield inferred else: yield util.Uninferable else: yield function_to_method(inferred, self) except exceptions.AttributeInferenceError as error: if not name.startswith("__") and self.has_dynamic_getattr(context): # class handle some dynamic attributes, return a Uninferable object yield util.Uninferable else: raise exceptions.InferenceError( error.message, target=self, attribute=name, context=context )
Infer the possible values of the given variable. :param name: The name of the variable to infer. :type name: str :returns: The inferred possible values. :rtype: iterable(NodeNG or Uninferable)
def isBridgeFiltered (self): """ Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that have a destination MAC address within this range are not relayed by bridges conforming to IEEE 802.1D """ return ((self.__value[0] == 0x01) and (self.__value[1] == 0x80) and (self.__value[2] == 0xC2) and (self.__value[3] == 0x00) and (self.__value[4] == 0x00) and (self.__value[5] <= 0x0F))
Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that have a destination MAC address within this range are not relayed by bridges conforming to IEEE 802.1D
def copy(self): """ Copy the grammar. """ new = self.__class__() for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", "tokens", "symbol2label"): setattr(new, dict_attr, getattr(self, dict_attr).copy()) new.labels = self.labels[:] new.states = self.states[:] new.start = self.start return new
Copy the grammar.
def picard_sort(picard, align_bam, sort_order="coordinate", out_file=None, compression_level=None, pipe=False): """Sort a BAM file by coordinates. """ base, ext = os.path.splitext(align_bam) if out_file is None: out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", out_file if pipe else tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", sort_order)] if compression_level: opts.append(("COMPRESSION_LEVEL", compression_level)) picard.run("SortSam", opts, pipe=pipe) return out_file
Sort a BAM file by coordinates.
def distance2_to_line(pt, l0, l1): '''The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line ''' pt = np.atleast_1d(pt) l0 = np.atleast_1d(l0) l1 = np.atleast_1d(l1) reshape = pt.ndim == 1 if reshape: pt.shape = l0.shape = l1.shape = (1, pt.shape[0]) result = (((l0[:,0] - l1[:,0]) * (l0[:,1] - pt[:,1]) - (l0[:,0] - pt[:,0]) * (l0[:,1] - l1[:,1]))**2 / np.sum((l1-l0)**2, 1)) if reshape: result = result[0] return result
The perpendicular distance squared from a point to a line pt - point in question l0 - one point on the line l1 - another point on the line
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.vpc.VPCConnection` :return: A connection to VPC """ from boto.vpc import VPCConnection return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.vpc.VPCConnection` :return: A connection to VPC
def write_xpm(matrix, version, out, scale=1, border=None, color='#000', background='#fff', name='img'): """\ Serializes the matrix as `XPM <https://en.wikipedia.org/wiki/X_PixMap>`_ image. :param matrix: The matrix to serialize. :param int version: The (Micro) QR code version :param out: Filename or a file-like object supporting to write binary data. :param scale: Indicates the size of a single module (default: 1 which corresponds to 1 x 1 pixel per module). :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes). :param color: Color of the modules (default: black). The color can be provided as ``(R, G, B)`` tuple, as web color name (like "red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``). :param background: Optional background color (default: white). See `color` for valid values. ``None`` indicates a transparent background. :param str name: Name of the image (must be a valid C-identifier). Default: "img". """ row_iter = matrix_iter(matrix, version, scale, border) width, height = get_symbol_size(version, scale=scale, border=border) stroke_color = colors.color_to_rgb_hex(color) bg_color = colors.color_to_rgb_hex(background) if background is not None else 'None' with writable(out, 'wt') as f: write = f.write write('/* XPM */\n' 'static char *{0}[] = {{\n' '"{1} {2} 2 1",\n' '" c {3}",\n' '"X c {4}",\n'.format(name, width, height, bg_color, stroke_color)) for i, row in enumerate(row_iter): write(''.join(chain(['"'], (' ' if not b else 'X' for b in row), ['"{0}\n'.format(',' if i < height - 1 else '')]))) write('};\n')
\ Serializes the matrix as `XPM <https://en.wikipedia.org/wiki/X_PixMap>`_ image. :param matrix: The matrix to serialize. :param int version: The (Micro) QR code version :param out: Filename or a file-like object supporting to write binary data. :param scale: Indicates the size of a single module (default: 1 which corresponds to 1 x 1 pixel per module). :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes). :param color: Color of the modules (default: black). The color can be provided as ``(R, G, B)`` tuple, as web color name (like "red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``). :param background: Optional background color (default: white). See `color` for valid values. ``None`` indicates a transparent background. :param str name: Name of the image (must be a valid C-identifier). Default: "img".