positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def group_members_remove(self, device_group_id, body, **kwargs): # noqa: E501 """Remove a device from a group # noqa: E501 Remove one device from a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_remove(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_remove_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
Remove a device from a group # noqa: E501 Remove one device from a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_remove(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread.
def fill_array(self, array, weights=None): """ Fill this histogram with a NumPy array """ try: try: from root_numpy import fill_hist as fill_func except ImportError: from root_numpy import fill_array as fill_func except ImportError: log.critical( "root_numpy is needed for Hist*.fill_array. " "Is it installed and importable?") raise fill_func(self, array, weights=weights)
Fill this histogram with a NumPy array
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
Creates sistr reports
def get_list(sld, tld): ''' Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.getlist') opts['TLD'] = tld opts['SLD'] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgetlistresult = response_xml.getElementsByTagName('DomainDNSGetListResult')[0] return salt.utils.namecheap.xml_to_dict(domaindnsgetlistresult)
Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld
def prune(self): """ Remove anything which shouldn't be displayed. """ def to_include(obj): inc = obj.permission in self.display if self.settings['hide_undoc'].lower() == 'true' and not obj.doc: inc = False return inc if self.obj == 'proc' and self.meta['proc_internals'] == 'false': self.functions = [] self.subroutines = [] self.types = [] self.interfaces = [] self.absinterfaces = [] self.variables = [] else: self.functions = [obj for obj in self.functions if to_include(obj)] self.subroutines = [obj for obj in self.subroutines if to_include(obj)] self.types = [obj for obj in self.types if to_include(obj)] self.interfaces = [obj for obj in self.interfaces if to_include(obj)] self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)] self.variables = [obj for obj in self.variables if to_include(obj)] if hasattr(self,'modprocedures'): self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)] if hasattr(self,'modsubroutines'): self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)] if hasattr(self,'modfunctions'): self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)] # Recurse for obj in self.absinterfaces: obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.visible = True for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'): obj.prune()
Remove anything which shouldn't be displayed.
def _concrete_instance(self, instance_doc): """Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance` """ if not isinstance(instance_doc, dict): return None # Attempt to instantiate the appropriate class for the given instance document. try: service = instance_doc['service'] cls = self._service_class_map[service] return cls(instance_document=instance_doc, instances=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}'.format(instance_doc) ) return None
Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance`
def _emit(self, **kwargs): ''' Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy. ''' oc_dict = {} for mapping, result_key in kwargs['mapping']['variables'].items(): result = kwargs[result_key] oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) for mapping, result in kwargs['mapping']['static'].items(): oc_dict = napalm_logs.utils.setval(mapping.format(**kwargs), result, oc_dict) return oc_dict
Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy.
def parameter_list(data): """Create a list of parameter objects from a dict. :param data: Dictionary to convert to parameter list. :type data: dict :return: Parameter list. :rtype: dict """ items = [] for item in data: param = Parameter(item['name'], item['value']) if 'meta' in item: param.meta = item['meta'] items.append(param) return items
Create a list of parameter objects from a dict. :param data: Dictionary to convert to parameter list. :type data: dict :return: Parameter list. :rtype: dict
def stats(request, date_offset=0, fields=None, title_prefix=None, model='WikiItem'): """ In addition to chart data in data['chart'], send statistics data to view in data['stats'] """ data = {} modified_chart_data = data['chart']['chartdata'] if 'y2' in data['chart']['chartdata']: matrix = db.Columns([modified_chart_data['y1'], modified_chart_data['y2']], ddof=0, tall=True) else: fields = ['date/time'] + fields matrix = db.Columns([modified_chart_data['x'], modified_chart_data['y']], ddof=0, tall=True) if fields and len(fields) > 1: fields = fields[:2] else: fields = [ data['chart']['chartdata'].get('name1') or 'time', data['chart']['chartdata'].get('name2') or data['chart']['chartdata'].get('name') or 'value', ] fields = util.pluralize_field_names(fields) data.update({ 'stats': { 'fields': fields, 'heading': 'Statistics', 'cov': zip(fields, matrix.cov()), 'R': zip(fields, matrix.rho), }, }) data['chart']['chartdata'] = modified_chart_data data['chart']['chart_title'] = 'Time Series' return render_to_response('miner/stats.html', data)
In addition to chart data in data['chart'], send statistics data to view in data['stats']
def set_environment_variable(self, name, value): """ Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string("env") m.add_boolean(False) m.add_string(name) m.add_string(value) self.transport._send_user_message(m)
Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed
def _getPhrase( self, i, sentence, NPlabels ): ''' Fetches the full length phrase from the position i based on the existing NP phrase annotations (from NPlabels); Returns list of sentence tokens in the phrase, and indices of the phrase; ''' phrase = [] indices = [] if 0 <= i and i < len(sentence) and NPlabels[i] == 'B': phrase = [ sentence[i] ] indices = [ i ] j = i + 1 while ( j < len(sentence) ): if NPlabels[j] in ['B', '']: break else: phrase.append( sentence[j] ) indices.append( j ) j += 1 return phrase, indices
Fetches the full length phrase from the position i based on the existing NP phrase annotations (from NPlabels); Returns list of sentence tokens in the phrase, and indices of the phrase;
def as_xml_part(self, basename="/tmp/sitemap.xml", part_number=0): """Return a string of component sitemap number part_number. Used in the case of a large list that is split into component sitemaps. basename is used to create "index" links to the sitemapindex Q - what timestamp should be used? """ if (not self.requires_multifile()): raise ListBaseIndexError( "Request for component sitemap for list with only %d entries when max_sitemap_entries is set to %s" % (len(self), str( self.max_sitemap_entries))) start = part_number * self.max_sitemap_entries if (start > len(self)): raise ListBaseIndexError( "Request for component sitemap with part_number too high, would start at entry %d yet the list has only %d entries" % (start, len(self))) stop = start + self.max_sitemap_entries if (stop > len(self)): stop = len(self) part = ListBase(itertools.islice(self.resources, start, stop)) part.capability_name = self.capability_name part.default_capability() part.index = basename s = self.new_sitemap() return(s.resources_as_xml(part))
Return a string of component sitemap number part_number. Used in the case of a large list that is split into component sitemaps. basename is used to create "index" links to the sitemapindex Q - what timestamp should be used?
def job_priority_index(job_priorities): '''This structure helps with finding data from the job priorities table''' jp_index = {} # Creating this data structure which reduces how many times we iterate through the DB rows for jp in job_priorities: key = jp.unique_identifier() # This is guaranteed by a unique composite index for these 3 fields in models.py if key in jp_index: msg = '"{}" should be a unique job priority and that is unexpected.'.format(key) raise DuplicateKeyError(msg) # (testtype, buildtype, platform) jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem} return jp_index
This structure helps with finding data from the job priorities table
def _pdb_frame(self): """Return current Pdb frame if there is any""" if self._pdb_obj is not None and self._pdb_obj.curframe is not None: return self._pdb_obj.curframe
Return current Pdb frame if there is any
def add_code_challenge(request_args, service, **kwargs): """ PKCE RFC 7636 support To be added as a post_construct method to an :py:class:`oidcservice.oidc.service.Authorization` instance :param service: The service that uses this function :param request_args: Set of request arguments :param kwargs: Extra set of keyword arguments :return: Updated set of request arguments """ try: cv_len = service.service_context.config['code_challenge']['length'] except KeyError: cv_len = 64 # Use default # code_verifier: string of length cv_len code_verifier = unreserved(cv_len) _cv = code_verifier.encode() try: _method = service.service_context.config['code_challenge']['method'] except KeyError: _method = 'S256' try: # Pick hash method _hash_method = CC_METHOD[_method] # Use it on the code_verifier _hv = _hash_method(_cv).digest() # base64 encode the hash value code_challenge = b64e(_hv).decode('ascii') except KeyError: raise Unsupported( 'PKCE Transformation method:{}'.format(_method)) _item = Message(code_verifier=code_verifier,code_challenge_method=_method) service.store_item(_item, 'pkce', request_args['state']) request_args.update({"code_challenge": code_challenge, "code_challenge_method": _method}) return request_args
PKCE RFC 7636 support To be added as a post_construct method to an :py:class:`oidcservice.oidc.service.Authorization` instance :param service: The service that uses this function :param request_args: Set of request arguments :param kwargs: Extra set of keyword arguments :return: Updated set of request arguments
def ProcessHttpRequest(self, http_request): """Hook for pre-processing of http requests.""" http_request.headers.update(self.additional_http_headers) if self.log_request: logging.info('Making http %s to %s', http_request.http_method, http_request.url) logging.info('Headers: %s', pprint.pformat(http_request.headers)) if http_request.body: # TODO(craigcitro): Make this safe to print in the case of # non-printable body characters. logging.info('Body:\n%s', http_request.loggable_body or http_request.body) else: logging.info('Body: (none)') return http_request
Hook for pre-processing of http requests.
def _iris_cell_methods_to_str(cell_methods_obj): """ Converts a Iris cell methods into a string """ cell_methods = [] for cell_method in cell_methods_obj: names = ''.join(['{}: '.format(n) for n in cell_method.coord_names]) intervals = ' '.join(['interval: {}'.format(interval) for interval in cell_method.intervals]) comments = ' '.join(['comment: {}'.format(comment) for comment in cell_method.comments]) extra = ' '.join([intervals, comments]).strip() if extra: extra = ' ({})'.format(extra) cell_methods.append(names + cell_method.method + extra) return ' '.join(cell_methods)
Converts a Iris cell methods into a string
def iter_distribution_names(self): """Yield all packages, modules, and extension names in distribution""" for pkg in self.packages or (): yield pkg for module in self.py_modules or (): yield module for ext in self.ext_modules or (): if isinstance(ext, tuple): name, buildinfo = ext else: name = ext.name if name.endswith('module'): name = name[:-6] yield name
Yield all packages, modules, and extension names in distribution
def domain_create(self, domain, master=True, **kwargs): """ Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain """ params = { 'domain': domain, 'type': 'master' if master else 'slave', } params.update(kwargs) result = self.post('/domains', data=params) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when creating Domain!', json=result) d = Domain(self, result['id'], result) return d
Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain
def get_prices(self) -> List[PriceModel]: """ Returns all available prices for security """ # return self.security.prices.order_by(Price.date) from pricedb.dal import Price pricedb = PriceDbApplication() repo = pricedb.get_price_repository() query = (repo.query(Price) .filter(Price.namespace == self.security.namespace) .filter(Price.symbol == self.security.mnemonic) .orderby_desc(Price.date) ) return query.all()
Returns all available prices for security
def _parse_oratab(sid): ''' Return ORACLE_HOME for a given SID found in oratab Note: only works with Unix-like minions ''' if __grains__.get('kernel') in ('Linux', 'AIX', 'FreeBSD', 'OpenBSD', 'NetBSD'): ORATAB = '/etc/oratab' elif __grains__.get('kernel') in 'SunOS': ORATAB = '/var/opt/oracle/oratab' else: # Windows has no oratab file raise CommandExecutionError( 'No uri defined for {0} and oratab not available in this OS'.format(sid)) with fopen(ORATAB, 'r') as f: while True: line = f.readline() if not line: break if line.startswith('#'): continue if sid in line.split(':')[0]: return line.split(':')[1] return None
Return ORACLE_HOME for a given SID found in oratab Note: only works with Unix-like minions
def drawPoints(self, pointPen, filterRedundantPoints=False): """draw self using pointPen""" if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
draw self using pointPen
def get_field(self, offset, length, format): """Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters """ return struct.unpack(format, self.data[offset:offset + length])[0]
Returns unpacked Python struct array. Args: offset (int): offset to byte array within structure length (int): how many bytes to unpack format (str): Python struct format string for unpacking See Also: https://docs.python.org/2/library/struct.html#format-characters
def train(self, record): """ Incrementally updates the tree with the given sample record. """ assert self.data.class_attribute_name in record, \ "The class attribute must be present in the record." record = record.copy() self.sample_count += 1 self.tree.train(record)
Incrementally updates the tree with the given sample record.
def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value
Truncates a string after a certain number of chars. Argument: Number of chars to truncate after.
def refresh_token(self, **kwargs): """Refresh the authentication token. :param str refresh_token: The refresh token to use. May be empty if retrieved with ``fetch_token``. """ if 'client_secret' not in kwargs: kwargs.update(client_secret=self.client_secret) if 'client_id' not in kwargs: kwargs.update(client_id=self.client_id) return self.session.refresh_token(token_url, **kwargs)
Refresh the authentication token. :param str refresh_token: The refresh token to use. May be empty if retrieved with ``fetch_token``.
def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin
Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API.
def is_snp(reference_bases, alternate_bases): """ Return whether or not the variant is a SNP """ if len(reference_bases) > 1: return False for alt in alternate_bases: if alt is None: return False if alt not in ['A', 'C', 'G', 'T', 'N', '*']: return False return True
Return whether or not the variant is a SNP
def _list_gids(): ''' Return a list of gids in use ''' output = __salt__['cmd.run']( ['dscacheutil', '-q', 'group'], output_loglevel='quiet', python_shell=False ) ret = set() for line in salt.utils.itertools.split(output, '\n'): if line.startswith('gid:'): ret.update(line.split()[1:]) return sorted(ret)
Return a list of gids in use
def granted(self, lock): '''Return True if a previously requested lock has been granted''' unit = hookenv.local_unit() ts = self.requests[unit].get(lock) if ts and self.grants.get(unit, {}).get(lock) == ts: return True return False
Return True if a previously requested lock has been granted
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None): '''Loads channel information into recording extractor. If a .prb file is given, then 'location' and 'group' information for each channel is stored. If a .csv file is given, then it will only store 'location' Parameters ---------- recording: RecordingExtractor The recording extractor to channel information probe_file: str Path to probe file. Either .prb or .csv Returns --------- subRecordingExtractor ''' probe_file = Path(probe_file) if probe_file.suffix == '.prb': probe_dict = read_python(probe_file) if 'channel_groups' in probe_dict.keys(): ordered_channels = np.array([], dtype=int) groups = sorted(probe_dict['channel_groups'].keys()) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': ordered_channels = np.concatenate((ordered_channels, prop_val)) if list(ordered_channels) == recording.get_channel_ids(): subrecording = recording else: if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]): print('Some channel in PRB file are in original recording') present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()] subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels) for cgroup_id in groups: cgroup = probe_dict['channel_groups'][cgroup_id] if 'channels' not in cgroup.keys() and len(groups) > 1: raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field" "for each channel group is required") elif 'channels' not in cgroup.keys(): channels_in_group = subrecording.get_num_channels() else: channels_in_group = len(cgroup['channels']) for key_prop, prop_val in cgroup.items(): if key_prop == 'channels': for i_ch, prop in enumerate(prop_val): if prop in subrecording.get_channel_ids(): subrecording.set_channel_property(prop, 'group', int(cgroup_id)) elif key_prop == 'geometry' or key_prop == 'location': if isinstance(prop_val, dict): if len(prop_val.keys()) == channels_in_group: print('geometry in PRB have not the same length as channel in group') for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', prop) else: if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group: for (i_ch, prop) in prop_val.items(): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group: for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, key_prop, prop) # create dummy locations if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys(): for i, chan in enumerate(subrecording.get_channel_ids()): subrecording.set_channel_property(chan, 'location', [i, 0]) else: raise AttributeError("'.prb' file should contain the 'channel_groups' field") elif probe_file.suffix == '.csv': if channel_map is not None: assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \ "all channel_ids in 'channel_map' must be in the original recording channel ids" subrecording = SubRecordingExtractor(recording, channel_ids=channel_map) else: subrecording = recording with probe_file.open() as csvfile: posreader = csv.reader(csvfile) row_count = 0 loaded_pos = [] for pos in (posreader): row_count += 1 loaded_pos.append(pos) assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \ "rows as the number of channels in the recordings" for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float))) if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()): for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups): if i_ch in subrecording.get_channel_ids(): subrecording.set_channel_property(i_ch, 'group', chg) else: raise NotImplementedError("Only .csv and .prb probe files can be loaded.") return subrecording
Loads channel information into recording extractor. If a .prb file is given, then 'location' and 'group' information for each channel is stored. If a .csv file is given, then it will only store 'location' Parameters ---------- recording: RecordingExtractor The recording extractor to channel information probe_file: str Path to probe file. Either .prb or .csv Returns --------- subRecordingExtractor
def get_nbt(self, x, z): """ Return a NBTFile of the specified chunk. Raise InconceivedChunk if the chunk is not included in the file. """ # TODO: cache results? data = self.get_blockdata(x, z) # This may raise a RegionFileFormatError. data = BytesIO(data) err = None try: nbt = NBTFile(buffer=data) if self.loc.x != None: x += self.loc.x*32 if self.loc.z != None: z += self.loc.z*32 nbt.loc = Location(x=x, z=z) return nbt # this may raise a MalformedFileError. Convert to ChunkDataError. except MalformedFileError as e: err = '%s' % e # avoid str(e) due to Unicode issues in Python 2. if err: raise ChunkDataError(err)
Return a NBTFile of the specified chunk. Raise InconceivedChunk if the chunk is not included in the file.
def _dispose(self): """Dispose of the pool for this instance, closing all connections.""" self.close() # _DBProxy.dispose doesn't actually call dispose on the pool conn_params = self.get_connection_params() key = db_pool._serialize(**conn_params) try: pool = db_pool.pools[key] except KeyError: pass else: pool.dispose() del db_pool.pools[key]
Dispose of the pool for this instance, closing all connections.
def plotJacobi(self,*args,**kwargs): """ NAME: plotE PURPOSE: plot Jacobi(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS) """ if kwargs.pop('normed',False): kwargs['d2']= 'Jacobinorm' else: kwargs['d2']= 'Jacobi' return self.plot(*args,**kwargs)
NAME: plotE PURPOSE: plot Jacobi(.) along the orbit INPUT: bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2014-06-16 - Written - Bovy (IAS)
def find_element_by_xpath(self, xpath): """ Finds an element by xpath. :param xpath: The xpath locator of the element to find. :return: See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_ """ from sdklib.html.elem import Elem5lib return Elem5lib(self.html_obj.find(self._convert_xpath(xpath)))
Finds an element by xpath. :param xpath: The xpath locator of the element to find. :return: See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
def dump_json_file(json_data, pwd_dir_path, dump_file_name): """ dump json data to file """ class PythonObjectEncoder(json.JSONEncoder): def default(self, obj): try: return super().default(self, obj) except TypeError: return str(obj) logs_dir_path = os.path.join(pwd_dir_path, "logs") if not os.path.isdir(logs_dir_path): os.makedirs(logs_dir_path) dump_file_path = os.path.join(logs_dir_path, dump_file_name) try: with io.open(dump_file_path, 'w', encoding='utf-8') as outfile: if is_py2: outfile.write( unicode(json.dumps( json_data, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder )) ) else: json.dump( json_data, outfile, indent=4, separators=(',', ':'), ensure_ascii=False, cls=PythonObjectEncoder ) msg = "dump file: {}".format(dump_file_path) logger.color_print(msg, "BLUE") except TypeError as ex: msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex) logger.color_print(msg, "RED")
dump json data to file
def Flemmer_Banks(Re): r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \frac{24}{Re}10^E E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 2E5 Examples -------- >>> Flemmer_Banks(200.) 0.7849169609270039 References ---------- .. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a Sphere." Powder Technology 48, no. 3 (November 1986): 217-21. doi:10.1016/0032-5910(86)80044-4. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045. ''' E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2) Cd = 24./Re*10**E return Cd
r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \frac{24}{Re}10^E E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 2E5 Examples -------- >>> Flemmer_Banks(200.) 0.7849169609270039 References ---------- .. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a Sphere." Powder Technology 48, no. 3 (November 1986): 217-21. doi:10.1016/0032-5910(86)80044-4. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045.
async def handle_action(self, action: str, request_id: str, **kwargs): """ run the action. """ try: await self.check_permissions(action, **kwargs) if action not in self.actions: raise MethodNotAllowed(method=action) content, status = await self.call_view( action=action, **kwargs ) await self.reply( action=action, request_id=request_id, data=content, status=status ) except Exception as exc: await self.handle_exception( exc, action=action, request_id=request_id )
run the action.
def service_password_encryption(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa") password_encryption = ET.SubElement(service, "password-encryption") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def update_session(self, alias, headers=None, cookies=None): """Update Session Headers: update a HTTP Session Headers ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of headers merge into session """ session = self._cache.switch(alias) session.headers = merge_setting(headers, session.headers) session.cookies = merge_cookies(session.cookies, cookies)
Update Session Headers: update a HTTP Session Headers ``alias`` Robot Framework alias to identify the session ``headers`` Dictionary of headers merge into session
def derive(self): '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))
def contribute_to_class(self, cls, name): """ Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``. """ super(KeywordsField, self).contribute_to_class(cls, name) string_field_name = list(self.fields.keys())[0] % \ self.related_field_name if hasattr(cls, "search_fields") and name in cls.search_fields: try: weight = cls.search_fields[name] except TypeError: # search_fields is a sequence. index = cls.search_fields.index(name) search_fields_type = type(cls.search_fields) cls.search_fields = list(cls.search_fields) cls.search_fields[index] = string_field_name cls.search_fields = search_fields_type(cls.search_fields) else: del cls.search_fields[name] cls.search_fields[string_field_name] = weight
Swap out any reference to ``KeywordsField`` with the ``KEYWORDS_FIELD_string`` field in ``search_fields``.
def _parse_header(self, data): """Parse header (xheader or yheader) :param data: data to be parsed :type data: str :return: list with header's data :rtype: list """ return_list = [] headers = data.split(':') for header in headers: header = re.split(' IN ', header, flags=re.I) # ignore case xheader = {'name': header[0].strip()} if len(header) > 1: xheader['units'] = header[1].strip() return_list.append(xheader) return return_list
Parse header (xheader or yheader) :param data: data to be parsed :type data: str :return: list with header's data :rtype: list
def burst_run(self): """ Run CPU as fast as Python can... """ # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots... get_and_call_next_op = self.get_and_call_next_op for __ in range(self.outer_burst_op_count): for __ in range(self.inner_burst_op_count): get_and_call_next_op() self.call_sync_callbacks()
Run CPU as fast as Python can...
def graph_dot(self): """ Export a graph of the data in dot format. """ default_graphviz_template = """ digraph role_dependencies { size="%size" dpi=%dpi ratio="fill" landscape=false rankdir="BT"; node [shape = "box", style = "rounded,filled", fillcolor = "lightgrey", fontsize = 20]; edge [style = "dashed", dir = "forward", penwidth = 1.5]; %roles_list %dependencies } """ roles_list = "" edges = "" # remove the darkest and brightest colors, still have 100+ colors adjusted_colors = c.X11_COLORS[125:-325] random.shuffle(adjusted_colors) backup_colors = adjusted_colors[:] for role, fields in sorted(self.report["roles"].iteritems()): name = utils.normalize_role(role, self.config) color_length = len(adjusted_colors) - 1 # reset the colors if we run out if color_length == 0: adjusted_colors = backup_colors[:] color_length = len(adjusted_colors) - 1 random_index = random.randint(1, color_length) roles_list += " role_{0} [label = \"{1}\"]\n" \ .format(re.sub(r'[.-/]', '_', name), name) edge = '\n edge [color = "{0}"];\n' \ .format(adjusted_colors[random_index]) del adjusted_colors[random_index] if fields["dependencies"]: dependencies = "" for dependency in sorted(fields["dependencies"]): dependency_name = utils.role_name(dependency) dependencies += " role_{0} -> role_{1}\n".format( re.sub(r'[.-/]', '_', name), re.sub(r'[.-/]', '_', utils.normalize_role(dependency_name, self.config) ) ) edges += "{0}{1}\n".format(edge, dependencies) graphviz_template = default_graphviz_template.replace("%roles_list", roles_list) graphviz_template = graphviz_template.replace("%dependencies", edges) graphviz_template = graphviz_template.replace("%size", self.size) graphviz_template = graphviz_template.replace("%dpi", str(self.dpi)) if self.out_file: utils.string_to_file(self.out_file, graphviz_template) else: print graphviz_template
Export a graph of the data in dot format.
def set_parallel_multiple(self, value): """ Setter for 'parallel_multiple' field. :param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value. """ if value is None or not isinstance(value, bool): raise TypeError("ParallelMultiple must be set to a bool") else: self.__parallel_multiple = value
Setter for 'parallel_multiple' field. :param value - a new value of 'parallel_multiple' field. Must be a boolean type. Does not accept None value.
def binaryEntropyVectorized(x): """ Calculate entropy for a list of binary random variables :param x: (numpy array) the probability of the variable to be 1. :return: entropy: (numpy array) entropy """ entropy = - x*np.log2(x) - (1-x)*np.log2(1-x) entropy[x*(1 - x) == 0] = 0 return entropy
Calculate entropy for a list of binary random variables :param x: (numpy array) the probability of the variable to be 1. :return: entropy: (numpy array) entropy
def _setup_logging(): """Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function. """ import logging logger = logging.getLogger('spotify-connect') handler = logging.NullHandler() logger.addHandler(handler)
Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function.
def pdf_new(self, x, mu, sigma, skw): """ function with different parameterisation :param x: :param mu: mean :param sigma: sigma :param skw: skewness :return: """ if skw > 1 or skw < -1: print("skewness %s out of range" % skw) skw = 1. e, w, a = self.map_mu_sigma_skw(mu, sigma, skw) pdf = self.pdf(x, e, w, a) return pdf
function with different parameterisation :param x: :param mu: mean :param sigma: sigma :param skw: skewness :return:
def show_support_save_status_output_show_support_save_status_message(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") message = ET.SubElement(show_support_save_status, "message") message.text = kwargs.pop('message') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def set_uppercase(self, uppercase): """Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise. """ for row in self.rows: for key in row.keys: if type(key) == VKey: if uppercase: key.value = key.value.upper() else: key.value = key.value.lower()
Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise.
def potential_purviews(self, direction, mechanism, purviews=False): """Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest. """ system = self.system[direction] return [ purview for purview in system.potential_purviews( direction, mechanism, purviews) if set(purview).issubset(self.purview_indices(direction)) ]
Return all purviews that could belong to the |MIC|/|MIE|. Filters out trivially-reducible purviews. Args: direction (str): Either |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism of interest. Keyword Args: purviews (tuple[int]): Optional subset of purviews of interest.
def get_none_policy_text(none_policy, # type: int verbose=False # type: bool ): """ Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return: """ if none_policy is NonePolicy.SKIP: return "accept None without performing validation" if verbose else 'SKIP' elif none_policy is NonePolicy.FAIL: return "fail on None without performing validation" if verbose else 'FAIL' elif none_policy is NonePolicy.VALIDATE: return "validate None as any other values" if verbose else 'VALIDATE' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_FAIL: return "accept None without validation if the argument is optional, otherwise fail on None" if verbose \ else 'SKIP_IF_NONABLE_ELSE_FAIL' elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE: return "accept None without validation if the argument is optional, otherwise validate None as any other " \ "values" if verbose else 'SKIP_IF_NONABLE_ELSE_VALIDATE' else: raise ValueError('Invalid none_policy ' + str(none_policy))
Returns a user-friendly description of a NonePolicy taking into account NoneArgPolicy :param none_policy: :param verbose: :return:
def findall_operations_between(self, start_frontier: Dict[ops.Qid, int], end_frontier: Dict[ops.Qid, int], omit_crossing_operations: bool = False ) -> List[Tuple[int, ops.Operation]]: """Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically. """ result = BucketPriorityQueue[ops.Operation]( drop_duplicate_entries=True) involved_qubits = set(start_frontier.keys()) | set(end_frontier.keys()) # Note: only sorted to ensure a deterministic result ordering. for q in sorted(involved_qubits): for i in range(start_frontier.get(q, 0), end_frontier.get(q, len(self))): op = self.operation_at(q, i) if op is None: continue if (omit_crossing_operations and not involved_qubits.issuperset(op.qubits)): continue result.enqueue(i, op) return list(result)
Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically.
def wrapinstance(ptr, base=None): """convert a pointer to a Qt class instance (PySide/PyQt compatible)""" if ptr is None: return None ptr = long(ptr) # Ensure type from wishlib.qt import active, QtCore, QtGui if active == "PySide": import shiboken if base is None: qObj = shiboken.wrapInstance(ptr, QtCore.QObject) metaObj = qObj.metaObject() cls = metaObj.className() superCls = metaObj.superClass().className() if hasattr(QtGui, cls): base = getattr(QtGui, cls) elif hasattr(QtGui, superCls): base = getattr(QtGui, superCls) else: base = QtGui.QWidget return shiboken.wrapInstance(ptr, base) elif active == "PyQt4": import sip return sip.wrapinstance(ptr, QtGui.QWidget) return None
convert a pointer to a Qt class instance (PySide/PyQt compatible)
def image_top(body_output, targets, model_hparams, vocab_size): """Top transformation for images.""" del targets # unused arg # TODO(lukaszkaiser): is this a universal enough way to get channels? num_channels = model_hparams.problem.num_channels with tf.variable_scope("rgb_softmax"): body_output_shape = common_layers.shape_list(body_output) reshape_shape = body_output_shape[:3] reshape_shape.extend([num_channels, vocab_size]) res = tf.layers.dense(body_output, vocab_size * num_channels) res = tf.reshape(res, reshape_shape) if not tf.get_variable_scope().reuse: res_argmax = tf.argmax(res, axis=-1) tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return res
Top transformation for images.
def createContactItem(self, person, label, number): """ Create a L{PhoneNumber} item for C{number}, associated with C{person}. @type person: L{Person} @param label: The value to use for the I{label} attribute of the new L{PhoneNumber} item. @type label: C{unicode} @param number: The value to use for the I{number} attribute of the new L{PhoneNumber} item. If C{''}, no item will be created. @type number: C{unicode} @rtype: L{PhoneNumber} or C{NoneType} """ if number: return PhoneNumber( store=person.store, person=person, label=label, number=number)
Create a L{PhoneNumber} item for C{number}, associated with C{person}. @type person: L{Person} @param label: The value to use for the I{label} attribute of the new L{PhoneNumber} item. @type label: C{unicode} @param number: The value to use for the I{number} attribute of the new L{PhoneNumber} item. If C{''}, no item will be created. @type number: C{unicode} @rtype: L{PhoneNumber} or C{NoneType}
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn): """ Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return: """ keeper = Keeper.get_instance() # service_agreement_id, did, service_definition_id, price, files, start_time, status for (agreement_id, did, _, price, files, start_time, _) in get_service_agreements(storage_path): ddo = did_resolver_fn(did) for service in ddo.services: if service.type != 'Access': continue consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data( agreement_id) if not consumer_provider_tuple: continue consumer, provider = consumer_provider_tuple did = ddo.did service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary()) condition_ids = service_agreement.generate_agreement_condition_ids( agreement_id, did, consumer, provider, keeper) if actor_type == 'consumer': assert account.address == consumer process_agreement_events_consumer( provider, agreement_id, did, service_agreement, price, account, condition_ids, None) else: assert account.address == provider process_agreement_events_publisher( account, agreement_id, did, service_agreement, price, consumer, condition_ids)
Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return:
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f"python setup.py clean") dist = ROOT.joinpath("dist") build = ROOT.joinpath("build") print(f"[clean] Removing {dist} and {build}") if dist.exists(): shutil.rmtree(str(dist)) if build.exists(): shutil.rmtree(str(build))
Clean previously built package artifacts.
def set_current(self, channel, value, unit='A'): '''Setting current of current source ''' dac_offset = self._ch_cal[channel]['DAC']['offset'] dac_gain = self._ch_cal[channel]['DAC']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'mA': value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'uA': value = int((-value - dac_offset) / dac_gain) # fix sign of output else: raise TypeError("Invalid unit type.") self._set_dac_value(channel=channel, value=value)
Setting current of current source
def right_censor_lifetimes(lifetimes, max_, min_=0): """ Right censor the deaths, uniformly lifetimes: (n,) array of positive random variables max_: the max time a censorship can occur min_: the min time a censorship can occur Returns The actual observations including uniform right censoring, and D_i (observed death or did not) I think this is deprecated """ n = lifetimes.shape[0] u = min_ + (max_ - min_) * random.rand(n) observations = np.minimum(u, lifetimes) return observations, lifetimes == observations
Right censor the deaths, uniformly lifetimes: (n,) array of positive random variables max_: the max time a censorship can occur min_: the min time a censorship can occur Returns The actual observations including uniform right censoring, and D_i (observed death or did not) I think this is deprecated
def is_array(type_): """returns True, if type represents C++ array type, False otherwise""" nake_type = remove_alias(type_) nake_type = remove_reference(nake_type) nake_type = remove_cv(nake_type) return isinstance(nake_type, cpptypes.array_t)
returns True, if type represents C++ array type, False otherwise
def stop_listener_thread(self): """ Stop listener thread running in the background """ if self.sync_thread: self.should_listen = False self.sync_thread.join() self.sync_thread = None
Stop listener thread running in the background
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
Plot a World on IPython Notebook
def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None, profile=None): ''' Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key_policy = conn.get_key_policy(key_id, policy_name) r['key_policy'] = salt.serializers.json.deserialize( key_policy['Policy'], object_pairs_hook=odict.OrderedDict ) except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
Get the policy for the specified key. CLI example:: salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy
def duration_to_text(self, duration): """ Return the textual representation of the given `duration`. The duration can either be a tuple of :class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the given `duration` is a tuple) or a number. """ if isinstance(duration, tuple): start = (duration[0].strftime(self.ENTRY_DURATION_FORMAT) if duration[0] is not None else '') end = (duration[1].strftime(self.ENTRY_DURATION_FORMAT) if duration[1] is not None else '?') duration = '%s-%s' % (start, end) else: duration = six.text_type(duration) return duration
Return the textual representation of the given `duration`. The duration can either be a tuple of :class:`datetime.time` objects, or a simple number. The returned text will be either a hhmm-hhmm string (if the given `duration` is a tuple) or a number.
def _run(self, url_path, headers=None, **kwargs): """ Requests API """ url = self._construct_url(url_path) payload = kwargs payload.update({'api_token': self.api_token}) return self._make_request(url, payload, headers)
Requests API
def disable_ipython(self): """ Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook. """ from IPython.core.getipython import get_ipython self.ipython_enabled = False ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.type_printers.pop(Visualization, None) formatter.type_printers.pop(VisualizationLocal, None)
Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook.
def _get_cache(self): """ Return the cache to use for thundering herd protection, etc. """ if not self._cache: self._cache = get_cache(self.app) return self._cache
Return the cache to use for thundering herd protection, etc.
def probabilities(self, choosers, alternatives): """ Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series """ logger.debug( 'start: calculate probabilities in LCM group {}'.format(self.name)) probs = {} for name, df in self._iter_groups(choosers): probs[name] = self.models[name].probabilities(df, alternatives) logger.debug( 'finish: calculate probabilities in LCM group {}'.format( self.name)) return probs
Returns alternative probabilties for each chooser segment as a dictionary keyed by segment name. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probabilties : dict of pandas.Series
def reset_new_request(self): """Remove the non-sense args from the self.ignore, return self.new_request""" raw_url = self.new_request['url'] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) new_url = self._join_url( parsed_url, [i for i in qsl if i not in self.ignore['qsl']]) self.new_request['url'] = new_url self.logger_function('ignore: %s' % self.ignore) for key in self.ignore['headers']: self.new_request['headers'].pop(key) if not self.new_request.get('headers'): self.new_request.pop('headers', None) if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']: headers = self.new_request['headers'] headers = {key.title(): headers[key] for key in headers} if 'Cookie' in headers: cookies = SimpleCookie(headers['Cookie']) new_cookie = '; '.join([ i[1].OutputString() for i in cookies.items() if i[0] not in self.ignore['Cookie'] ]) self.new_request['headers']['Cookie'] = new_cookie if self.new_request['method'] == 'post': data = self.new_request.get('data') if data: if isinstance(data, dict): for key in self.ignore['form_data']: data.pop(key) if (not data) or self.ignore['total_data']: # not need data any more self.new_request.pop('data', None) if self.has_json_data and 'data' in self.new_request: json_data = json.loads(data.decode(self.encoding)) for key in self.ignore['json_data']: json_data.pop(key) self.new_request['data'] = json.dumps(json_data).encode( self.encoding) return self.new_request
Remove the non-sense args from the self.ignore, return self.new_request
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = Picker(self.get_context(), None, d.style or '@attr/numberPickerStyle')
Create the underlying widget.
def setLocked(self, state, force=False): """ Sets the locked state for this panel to the inputed state. :param state | <bool> """ if not force and state == self._locked: return self._locked = state tabbar = self.tabBar() tabbar.setLocked(state) if self.hideTabsWhenLocked(): tabbar.setVisible(self.count() > 1 or not state) else: tabbar.setVisible(True) if tabbar.isVisible(): self.setContentsMargins(6, tabbar.height(), 6, 6) else: self.setContentsMargins(1, 1, 1, 1) self.adjustSizeConstraint()
Sets the locked state for this panel to the inputed state. :param state | <bool>
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'credential_id') and self.credential_id is not None: _dict['credential_id'] = self.credential_id if hasattr(self, 'source_type') and self.source_type is not None: _dict['source_type'] = self.source_type if hasattr( self, 'credential_details') and self.credential_details is not None: _dict['credential_details'] = self.credential_details._to_dict() return _dict
Return a json dictionary representing this model.
def _add_auth_headers(self, base): """Attach the acces_token to a request.""" if 'access_token' in self.creds: return _extend(base, { 'authorization': 'Bearer ' + self.creds['access_token'] }) return base
Attach the acces_token to a request.
def estimateHeritabilities(self, K, verbose=False): """ estimate variance components and fixed effects from a single trait model having only two terms """ # Fit single trait model varg = SP.zeros(self.P) varn = SP.zeros(self.P) fixed = SP.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] lmm = limix.CLMM() lmm.setK(K) lmm.setSNPs(SP.ones((K.shape[0],1))) lmm.setPheno(y) lmm.setCovs(SP.zeros((K.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = SP.exp(lmm.getLdelta0()[0,0]) Vtot = SP.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
estimate variance components and fixed effects from a single trait model having only two terms
def get_action_arguments(self, service_name, action_name): """ Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type. """ return self.services[service_name].actions[action_name].info
Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type.
def _normalize_stack(graphobjs): """Convert runs of qQ's in the stack into single graphobjs""" for operands, operator in graphobjs: operator = str(operator) if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q for char in operator: # Split into individual yield ([], char) # Yield individual else: yield (operands, operator)
Convert runs of qQ's in the stack into single graphobjs
def sigmascale (nsigma): """Say we take a Gaussian bivariate and convert the parameters of the distribution to an ellipse (major, minor, PA). By what factor should we scale those axes to make the area of the ellipse correspond to the n-sigma confidence interval? Negative or zero values result in NaN. """ from scipy.special import erfc return np.sqrt (-2 * np.log (erfc (nsigma / np.sqrt (2))))
Say we take a Gaussian bivariate and convert the parameters of the distribution to an ellipse (major, minor, PA). By what factor should we scale those axes to make the area of the ellipse correspond to the n-sigma confidence interval? Negative or zero values result in NaN.
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. Parameters ---------- pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. .. versionadded:: 0.20.0 `repl` also accepts a callable. n : int, default -1 (all) Number of replacements to make from start. case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. .. versionadded:: 0.23.0 Returns ------- Series or Index of object A copy of the object with all matching occurrences of `pat` replaced by `repl`. Raises ------ ValueError * if `regex` is False and `repl` is a callable or `pat` is a compiled regex * if `pat` is a compiled regex and `case` or `flags` is set Notes ----- When `pat` is a compiled regex, all flags should be included in the compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled regex will raise an error. Examples -------- When `pat` is a string and `regex` is True (the default), the given `pat` is compiled as a regex. When `repl` is a string, it replaces matching regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are left as is: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) 0 bao 1 baz 2 NaN dtype: object When `pat` is a string and `regex` is False, every `pat` is replaced with `repl` as with :meth:`str.replace`: >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) 0 bao 1 fuz 2 NaN dtype: object When `repl` is a callable, it is called on every `pat` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz 2 NaN dtype: object Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) 0 oof 123 1 rab zab 2 NaN dtype: object Using regex groups (extract second group and swap case): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) 0 tWO 1 bAR dtype: object Using a compiled regex with flags >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo 1 bar 2 NaN dtype: object """ # Check whether repl is valid (GH 13438, GH 15055) if not (is_string_like(repl) or callable(repl)): raise TypeError("repl must be a string or callable") is_compiled_re = is_re(pat) if regex: if is_compiled_re: if (case is not None) or (flags != 0): raise ValueError("case and flags cannot be set" " when pat is a compiled regex") else: # not a compiled regex # set default case if case is None: case = True # add case flag, if provided if case is False: flags |= re.IGNORECASE if is_compiled_re or len(pat) > 1 or flags or callable(repl): n = n if n >= 0 else 0 compiled = re.compile(pat, flags=flags) f = lambda x: compiled.sub(repl=repl, string=x, count=n) else: f = lambda x: x.replace(pat, repl, n) else: if is_compiled_re: raise ValueError("Cannot use a compiled regex as replacement " "pattern with regex=False") if callable(repl): raise ValueError("Cannot use a callable replacement when " "regex=False") f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr)
r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. Parameters ---------- pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. .. versionadded:: 0.20.0 `repl` also accepts a callable. n : int, default -1 (all) Number of replacements to make from start. case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. .. versionadded:: 0.23.0 Returns ------- Series or Index of object A copy of the object with all matching occurrences of `pat` replaced by `repl`. Raises ------ ValueError * if `regex` is False and `repl` is a callable or `pat` is a compiled regex * if `pat` is a compiled regex and `case` or `flags` is set Notes ----- When `pat` is a compiled regex, all flags should be included in the compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled regex will raise an error. Examples -------- When `pat` is a string and `regex` is True (the default), the given `pat` is compiled as a regex. When `repl` is a string, it replaces matching regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are left as is: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) 0 bao 1 baz 2 NaN dtype: object When `pat` is a string and `regex` is False, every `pat` is replaced with `repl` as with :meth:`str.replace`: >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) 0 bao 1 fuz 2 NaN dtype: object When `repl` is a callable, it is called on every `pat` using :func:`re.sub`. The callable should expect one positional argument (a regex object) and return a string. To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz 2 NaN dtype: object Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) 0 oof 123 1 rab zab 2 NaN dtype: object Using regex groups (extract second group and swap case): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) 0 tWO 1 bAR dtype: object Using a compiled regex with flags >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo 1 bar 2 NaN dtype: object
def Start(self): """Starts the profiler.""" filename = '{0:s}-{1:s}.csv.gz'.format( self._FILENAME_PREFIX, self._identifier) if self._path: filename = os.path.join(self._path, filename) self._sample_file = gzip.open(filename, 'wb') self._WritesString(self._FILE_HEADER) self._start_time = time.time()
Starts the profiler.
def write(self, inputdata): """Write to a port on dummy_serial. Args: inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response for subsequent read operations. Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**. """ if VERBOSE: _print_out('\nDummy_serial: Writing to port. Given:' + repr(inputdata) + '\n') if sys.version_info[0] > 2: if not type(inputdata) == bytes: raise TypeError('The input must be type bytes. Given:' + repr(inputdata)) inputstring = str(inputdata, encoding='latin1') else: inputstring = inputdata if not self._isOpen: raise IOError('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)) # Look up which data that should be waiting for subsequent read commands try: response = RESPONSES[inputstring] except: response = DEFAULT_RESPONSE self._waiting_data = response
Write to a port on dummy_serial. Args: inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response for subsequent read operations. Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
def export_event_params(filename, params, count=None, density=None): """Write event analysis data to CSV.""" heading_row_1 = ['Segment index', 'Start time', 'End time', 'Stitches', 'Stage', 'Cycle', 'Event type', 'Channel'] spacer = [''] * (len(heading_row_1) - 1) param_headings_1 = ['Min. amplitude (uV)', 'Max. amplitude (uV)', 'Peak-to-peak amplitude (uV)', 'RMS (uV)'] param_headings_2 = ['Power (uV^2)', 'Peak power frequency (Hz)', 'Energy (uV^2s)', 'Peak energy frequency (Hz)'] slope_headings = ['Q1 average slope (uV/s)', 'Q2 average slope (uV/s)', 'Q3 average slope (uV/s)', 'Q4 average slope (uV/s)', 'Q23 average slope (uV/s)', 'Q1 max. slope (uV/s^2)', 'Q2 max. slope (uV/s^2)', 'Q3 max. slope (uV/s^2)', 'Q4 max. slope (uV/s^2)', 'Q23 max. slope (uV/s^2)'] ordered_params_1 = ['minamp', 'maxamp', 'ptp', 'rms'] ordered_params_2 = ['power', 'peakpf', 'energy', 'peakef'] idx_params_1 = in1d(ordered_params_1, list(params[0].keys())) sel_params_1 = list(compress(ordered_params_1, idx_params_1)) heading_row_2 = list(compress(param_headings_1, idx_params_1)) if 'dur' in params[0].keys(): heading_row_2 = ['Duration (s)'] + heading_row_2 idx_params_2 = in1d(ordered_params_2, list(params[0].keys())) sel_params_2 = list(compress(ordered_params_2, idx_params_2)) heading_row_3 = list(compress(param_headings_2, idx_params_2)) heading_row_4 = [] if 'slope' in params[0].keys(): if next(iter(params[0]['slope']))[0]: heading_row_4.extend(slope_headings[:5]) if next(iter(params[0]['slope']))[1]: heading_row_4.extend(slope_headings[5:]) # Get data as matrix and compute descriptives dat = [] if 'dur' in params[0].keys(): one_mat = asarray([seg['dur'] for seg in params \ for chan in seg['data'].axis['chan'][0]]) one_mat = reshape(one_mat, (len(one_mat), 1)) dat.append(one_mat) if sel_params_1: one_mat = asarray([[seg[x](chan=chan)[0] for x in sel_params_1] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if sel_params_2: one_mat = asarray([[seg[x][chan] for x in sel_params_2] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if 'slope' in params[0].keys(): one_mat = asarray([[x for y in seg['slope'][chan] for x in y] \ for seg in params for chan in seg['data'].axis['chan'][0]]) dat.append(one_mat) if dat: dat = concatenate(dat, axis=1) desc = get_descriptives(dat) with open(filename, 'w', newline='') as f: lg.info('Writing to ' + str(filename)) csv_file = writer(f) csv_file.writerow(['Wonambi v{}'.format(__version__)]) if count: csv_file.writerow(['Count', count]) if density: csv_file.writerow(['Density', density]) if dat == []: return csv_file.writerow(heading_row_1 + heading_row_2 + heading_row_3 \ + heading_row_4) csv_file.writerow(['Mean'] + spacer + list(desc['mean'])) csv_file.writerow(['SD'] + spacer + list(desc['sd'])) csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log'])) csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log'])) idx = 0 for seg in params: if seg['cycle'] is not None: seg['cycle'] = seg['cycle'][2] for chan in seg['data'].axis['chan'][0]: idx += 1 data_row_1 = [seg[x](chan=chan)[0] for x in sel_params_1] data_row_2 = [seg[x][chan] for x in sel_params_2] if 'dur' in seg.keys(): data_row_1 = [seg['dur']] + data_row_1 if 'slope' in seg.keys(): data_row_3 = [x for y in seg['slope'][chan] for x in y] data_row_2 = data_row_2 + data_row_3 csv_file.writerow([idx, seg['start'], seg['end'], seg['n_stitch'], seg['stage'], seg['cycle'], seg['name'], chan, ] + data_row_1 + data_row_2)
Write event analysis data to CSV.
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
Make a request to API endpoint.
def draw_png(self, image, h_zoom, v_zoom, current_y): """ Draw this time scale to PNG. :param image: the image to draw onto :param int h_zoom: the horizontal zoom :param int v_zoom: the vertical zoom :param int current_y: the current y offset, in modules :type image: :class:`PIL.Image` """ # PIL object draw = ImageDraw.Draw(image) mws = self.rconf.mws pixels_per_second = int(h_zoom / mws) current_y_px = current_y * v_zoom # create font, as tall as possible font_height_pt = 18 font = ImageFont.truetype(self.FONT_PATH, font_height_pt) # draw a tick every self.time_step seconds for i in range(0, 1 + int(self.max_time), self.time_step): # base x position begin_px = i * pixels_per_second # tick left_px = begin_px - self.TICK_WIDTH right_px = begin_px + self.TICK_WIDTH top_px = current_y_px bottom_px = current_y_px + v_zoom draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK) # text time_text = self._time_string(i) left_px = begin_px + self.TICK_WIDTH + self.TEXT_MARGIN top_px = current_y_px + (v_zoom - self.text_bounding_box(font_height_pt, time_text)[1]) // 2 draw.text((left_px, top_px), time_text, PlotterColors.BLACK, font=font)
Draw this time scale to PNG. :param image: the image to draw onto :param int h_zoom: the horizontal zoom :param int v_zoom: the vertical zoom :param int current_y: the current y offset, in modules :type image: :class:`PIL.Image`
def set_options(self, options): """ Sets instance variables based on an options dict """ # COMMAND LINE OPTIONS self.wipe = options.get("wipe") self.test_run = options.get("test_run") self.quiet = options.get("test_run") self.container_name = options.get("container") self.verbosity = int(options.get("verbosity")) self.syncmedia = options.get("syncmedia") self.syncstatic = options.get("syncstatic") if self.test_run: self.verbosity = 2 cli_includes = options.get("includes") cli_excludes = options.get("excludes") # CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY if self.syncmedia and self.syncstatic: raise CommandError("options --media and --static are mutually exclusive") if not self.container_name: if self.syncmedia: self.container_name = CUMULUS["CONTAINER"] elif self.syncstatic: self.container_name = CUMULUS["STATIC_CONTAINER"] else: raise CommandError("must select one of the required options, either --media or --static") settings_includes = CUMULUS["INCLUDE_LIST"] settings_excludes = CUMULUS["EXCLUDE_LIST"] # PATH SETTINGS if self.syncmedia: self.file_root = os.path.abspath(settings.MEDIA_ROOT) self.file_url = settings.MEDIA_URL elif self.syncstatic: self.file_root = os.path.abspath(settings.STATIC_ROOT) self.file_url = settings.STATIC_URL if not self.file_root.endswith("/"): self.file_root = self.file_root + "/" if self.file_url.startswith("/"): self.file_url = self.file_url[1:] # SYNCSTATIC VARS # combine includes and excludes from the cli and django settings file self.includes = list(set(cli_includes + settings_includes)) self.excludes = list(set(cli_excludes + settings_excludes)) # transform glob patterns to regular expressions self.local_filenames = [] self.create_count = 0 self.upload_count = 0 self.update_count = 0 self.skip_count = 0 self.delete_count = 0
Sets instance variables based on an options dict
def density(args): """ %prog density test.clm Estimate link density of contigs. """ p = OptionParser(density.__doc__) p.add_option("--save", default=False, action="store_true", help="Write log densitites of contigs to file") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) clmfile, = args clm = CLMFile(clmfile) pf = clmfile.rsplit(".", 1)[0] if opts.save: logdensities = clm.calculate_densities() densityfile = pf + ".density" fw = open(densityfile, "w") for name, logd in logdensities.items(): s = clm.tig_to_size[name] print("\t".join(str(x) for x in (name, s, logd)), file=fw) fw.close() logging.debug("Density written to `{}`".format(densityfile)) tourfile = clmfile.rsplit(".", 1)[0] + ".tour" tour = clm.activate(tourfile=tourfile, backuptour=False) clm.flip_all(tour) clm.flip_whole(tour) clm.flip_one(tour)
%prog density test.clm Estimate link density of contigs.
def _from_dict(cls, _dict): """Initialize a Context object from a json dictionary.""" args = {} xtra = _dict.copy() if 'conversation_id' in _dict: args['conversation_id'] = _dict.get('conversation_id') del xtra['conversation_id'] if 'system' in _dict: args['system'] = SystemResponse._from_dict(_dict.get('system')) del xtra['system'] if 'metadata' in _dict: args['metadata'] = MessageContextMetadata._from_dict( _dict.get('metadata')) del xtra['metadata'] args.update(xtra) return cls(**args)
Initialize a Context object from a json dictionary.
def register_cli_argument(self, scope, dest, argtype, **kwargs): """ Add an argument to the argument registry :param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand') :type scope: str :param dest: The parameter/destination that this argument is for :type dest: str :param argtype: The argument type for this command argument :type argtype: knack.arguments.CLIArgumentType :param kwargs: see knack.arguments.CLIArgumentType """ argument = CLIArgumentType(overrides=argtype, **kwargs) self.arguments[scope][dest] = argument
Add an argument to the argument registry :param scope: The command level to apply the argument registration (e.g. 'mygroup mycommand') :type scope: str :param dest: The parameter/destination that this argument is for :type dest: str :param argtype: The argument type for this command argument :type argtype: knack.arguments.CLIArgumentType :param kwargs: see knack.arguments.CLIArgumentType
def set_prekeys_as_sent(self, prekeyIds): """ :param prekeyIds: :type prekeyIds: list :return: :rtype: """ logger.debug("set_prekeys_as_sent(prekeyIds=[%d prekeyIds])" % len(prekeyIds)) self._store.preKeyStore.setAsSent([prekey.getId() for prekey in prekeyIds])
:param prekeyIds: :type prekeyIds: list :return: :rtype:
def _add_datetime_arraylike(self, other): """ Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray. """ if isinstance(other, np.ndarray): # At this point we have already checked that dtype is datetime64 from pandas.core.arrays import DatetimeArray other = DatetimeArray(other) # defer to implementation in DatetimeArray return other + self
Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.
def command(self): """Returns a string representing the command you have to type to obtain the same packet""" f = [] for fn,fv in self.fields.items(): fld = self.get_field(fn) if isinstance(fv, Packet): fv = fv.command() elif fld.islist and fld.holds_packets and type(fv) is list: #fv = "[%s]" % ",".join( map(Packet.command, fv)) fv = "[%s]" % ",".join([ Packet.command(i) for i in fv ]) else: fv = repr(fv) f.append("%s=%s" % (fn, fv)) c = "%s(%s)" % (self.__class__.__name__, ", ".join(f)) pc = self.payload.command() if pc: c += "/"+pc return c
Returns a string representing the command you have to type to obtain the same packet
def _generateFeatures(self): """ Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ size = self.sensorInputSize bits = self.numInputBits self.features = [] for _ in xrange(self.numColumns): self.features.append( [self._generatePattern(bits, size) for _ in xrange(self.numFeatures)] )
Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column.
def get_maxsing(self,eigthresh=1.0e-5): """ Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components """ #sthresh =np.abs((self.s.x / self.s.x[0]) - eigthresh) sthresh = self.s.x.flatten()/self.s.x[0] ising = 0 for i,st in enumerate(sthresh): if st > eigthresh: ising += 1 #return max(1,i) else: break #return max(1,np.argmin(sthresh)) return max(1,ising)
Get the number of singular components with a singular value ratio greater than or equal to eigthresh Parameters ---------- eigthresh : float the ratio of the largest to smallest singular value Returns ------- int : int number of singular components
def get(self, key, side): """ Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return """ return getattr(self, side).ravel()[self.keys[key]]
Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return
def post(self) -> Vpn: """ Creates the vpn with the given data. """ vpn = Vpn() session.add(vpn) self.update(vpn) session.flush() session.commit() return vpn, 201, { 'Location': url_for('vpn', vpn_id=vpn.id) }
Creates the vpn with the given data.
def RIBVRFRouteLimitExceeded_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(RIBVRFRouteLimitExceeded, "originator-switch-info") switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId") switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'a'): a = self.a b = self.b elif hasattr(self, 'a_in'): # annulus a = self.a_out b = self.b_out b_in = self.a_in * self.b_out / self.a_out else: raise ValueError('Cannot determine the aperture shape.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, a, b, self.theta, use_exact, subpixels) # subtract the inner ellipse for an annulus if hasattr(self, 'a_in'): mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.a_in, b_in, self.theta, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks
Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects.
def delete(self, *args, **kwargs): """ custom delete method to update counts """ super(UpdateCountsMixin, self).delete(*args, **kwargs) self.update_count()
custom delete method to update counts
def replace_entities(self, html): """ Replace htmlentities with unicode characters @Params html - html source to replace entities in @Returns String html with entities replaced """ def fixup(text): """replace the htmlentities in some text""" text = text.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return chr(int(text[3:-1], 16)) else: return chr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = chr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub(r"&#?\w+;", fixup, html)
Replace htmlentities with unicode characters @Params html - html source to replace entities in @Returns String html with entities replaced