code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None): """ Adapts a single persistent instance to a REST model; at present this is a common method for all persistent backends. Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as for discussion on this feature """ # try and get the adapter and the REST class for the persistent object if target_rest_class is None: adapter_instance = registry.get_adapter_for_persistent_model(persistent_object) else: if inspect.isclass(target_rest_class): target_rest_class = target_rest_class() adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class) # would raise an exception if the attribute_filter differs from the target_rest_class if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter): parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter) # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter)
Adapts a single persistent instance to a REST model; at present this is a common method for all persistent backends. Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as for discussion on this feature
def lag_avgs(self): ''' same data as expo_avgs, but with keys as the average age of the data -- assuming evenly spaced data points -- rather than decay rates ''' if not self.interval: return interval = self.interval.mean return dict([(interval/alpha, val) for alpha, val in self.get_expo_avgs().items()])
same data as expo_avgs, but with keys as the average age of the data -- assuming evenly spaced data points -- rather than decay rates
def temp45(msg): """Static air temperature. Args: msg (String): 28 bytes hexadecimal message string Returns: float: tmeperature in Celsius degree """ d = hex2bin(data(msg)) sign = int(d[16]) value = bin2int(d[17:26]) if sign: value = value - 512 temp = value * 0.25 # celsius temp = round(temp, 1) return temp
Static air temperature. Args: msg (String): 28 bytes hexadecimal message string Returns: float: tmeperature in Celsius degree
def update(self): """Update RAM memory stats using the input method.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab MEM using the psutil virtual_memory method vm_stats = psutil.virtual_memory() # Get all the memory stats (copy/paste of the psutil documentation) # total: total physical memory available. # available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion. # percent: the percentage usage calculated as (total - available) / total * 100. # used: memory used, calculated differently depending on the platform and designed for informational purposes only. # free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead). # Platform-specific fields: # active: (UNIX): memory currently in use or very recently used, and so it is in RAM. # inactive: (UNIX): memory that is marked as not used. # buffers: (Linux, BSD): cache for things like file system metadata. # cached: (Linux, BSD): cache for various things. # wired: (BSD, macOS): memory that is marked to always stay in RAM. It is never moved to disk. # shared: (BSD): memory that may be simultaneously accessed by multiple processes. self.reset() for mem in ['total', 'available', 'percent', 'used', 'free', 'active', 'inactive', 'buffers', 'cached', 'wired', 'shared']: if hasattr(vm_stats, mem): stats[mem] = getattr(vm_stats, mem) # Use the 'free'/htop calculation # free=available+buffer+cached stats['free'] = stats['available'] if hasattr(stats, 'buffers'): stats['free'] += stats['buffers'] if hasattr(stats, 'cached'): stats['free'] += stats['cached'] # used=total-free stats['used'] = stats['total'] - stats['free'] elif self.input_method == 'snmp': # Update stats using SNMP if self.short_system_name in ('windows', 'esxi'): # Mem stats for Windows|Vmware Esxi are stored in the FS table try: fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: self.reset() else: for fs in fs_stat: # The Physical Memory (Windows) or Real Memory (VMware) # gives statistics on RAM usage and availability. if fs in ('Physical Memory', 'Real Memory'): stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit']) stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit']) stats['percent'] = float(stats['used'] * 100 / stats['total']) stats['free'] = stats['total'] - stats['used'] break else: # Default behavor for others OS stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) if stats['total'] == '': self.reset() return self.stats for key in iterkeys(stats): if stats[key] != '': stats[key] = float(stats[key]) * 1024 # Use the 'free'/htop calculation stats['free'] = stats['free'] - stats['total'] + (stats['buffers'] + stats['cached']) # used=total-free stats['used'] = stats['total'] - stats['free'] # percent: the percentage usage calculated as (total - available) / total * 100. stats['percent'] = float((stats['total'] - stats['free']) / stats['total'] * 100) # Update the stats self.stats = stats return self.stats
Update RAM memory stats using the input method.
def find(self, pattern): """ Searches for a pattern in the current memory segment """ pos = self.current_segment.data.find(pattern) if pos == -1: return -1 return pos + self.current_position
Searches for a pattern in the current memory segment
def getPassage(self, urn, inventory=None, context=None): """ Retrieve a passage :param urn: URN identifying the text's passage (Minimum depth : 1) :type urn: text :param inventory: Name of the inventory :type inventory: text :param context: Number of citation units at the same level of the citation hierarchy as the requested urn, immediately preceding and immediately following the requested urn to include in the reply :type context: int :rtype: str """ return self.call({ "inv": inventory, "urn": urn, "context": context, "request": "GetPassage" })
Retrieve a passage :param urn: URN identifying the text's passage (Minimum depth : 1) :type urn: text :param inventory: Name of the inventory :type inventory: text :param context: Number of citation units at the same level of the citation hierarchy as the requested urn, immediately preceding and immediately following the requested urn to include in the reply :type context: int :rtype: str
def create_diamond_db(self): '''Create a diamond database from the unaligned sequences in this package. Returns ------- path to the created diamond db e.g. 'my_sequences.dmnd' ''' base = self.unaligned_sequence_database_path() cmd = "diamond makedb --in '%s' -d '%s'" % (self.unaligned_sequence_database_path(), base) extern.run(cmd) diamondb = '%s.dmnd' % base # Mostly this moves a file to it's current location because Create # follows this same logic, but there's a specially crafted # test/data/mcrA.gpkg which is slightly different. os.rename(diamondb, self.diamond_database_path()) return diamondb
Create a diamond database from the unaligned sequences in this package. Returns ------- path to the created diamond db e.g. 'my_sequences.dmnd'
def _scrollTree( self, value ): """ Updates the tree view scrolling to the inputed value. :param value | <int> """ if self._scrolling: return tree_bar = self.uiGanttTREE.verticalScrollBar() self._scrolling = True tree_bar.setValue(value) self._scrolling = False
Updates the tree view scrolling to the inputed value. :param value | <int>
def search(keyword, type=1, offset=0, limit=30): """搜索歌曲,支持搜索歌曲、歌手、专辑等 :param keyword: 关键词 :param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 30 """ if keyword is None: raise ParamsError() r = NCloudBot() r.method = 'SEARCH' r.data = { 's': keyword, 'limit': str(limit), 'type': str(type), 'offset': str(offset) } r.send() return r.response
搜索歌曲,支持搜索歌曲、歌手、专辑等 :param keyword: 关键词 :param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 30
def listFormats(self, vendorSpecific=None): """See Also: listFormatsResponse() Args: vendorSpecific: Returns: """ response = self.listFormatsResponse(vendorSpecific) return self._read_dataone_type_response(response, 'ObjectFormatList')
See Also: listFormatsResponse() Args: vendorSpecific: Returns:
def new_qt_console(self, evt=None): """start a new qtconsole connected to our kernel""" return connect_qtconsole(self.ipkernel.connection_file, profile=self.ipkernel.profile)
start a new qtconsole connected to our kernel
def ensure_dtraj_list(dtrajs): r"""Makes sure that dtrajs is a list of discrete trajectories (array of int) """ if isinstance(dtrajs, list): # elements are ints? then wrap into a list if is_list_of_int(dtrajs): return [np.array(dtrajs, dtype=int)] else: for i in range(len(dtrajs)): dtrajs[i] = ensure_dtraj(dtrajs[i]) return dtrajs else: return [ensure_dtraj(dtrajs)]
r"""Makes sure that dtrajs is a list of discrete trajectories (array of int)
def from_ofxparse(data, institution): """Instantiate :py:class:`ofxclient.Account` subclass from ofxparse module :param data: an ofxparse account :type data: An :py:class:`ofxparse.Account` object :param institution: The parent institution of the account :type institution: :py:class:`ofxclient.Institution` object """ description = data.desc if hasattr(data, 'desc') else None if data.type == AccountType.Bank: return BankAccount( institution=institution, number=data.account_id, routing_number=data.routing_number, account_type=data.account_type, description=description) elif data.type == AccountType.CreditCard: return CreditCardAccount( institution=institution, number=data.account_id, description=description) elif data.type == AccountType.Investment: return BrokerageAccount( institution=institution, number=data.account_id, broker_id=data.brokerid, description=description) raise ValueError("unknown account type: %s" % data.type)
Instantiate :py:class:`ofxclient.Account` subclass from ofxparse module :param data: an ofxparse account :type data: An :py:class:`ofxparse.Account` object :param institution: The parent institution of the account :type institution: :py:class:`ofxclient.Institution` object
def get_type_data(name): """Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type """ name = name.upper() try: return { 'authority': 'birdland.mit.edu', 'namespace': 'currency format', 'identifier': name, 'domain': 'Currency Format Types', 'display_name': JEFFS_CURRENCY_FORMAT_TYPES[name] + ' Currency Format Type', 'display_label': JEFFS_CURRENCY_FORMAT_TYPES[name], 'description': ('The format type for the ' + JEFFS_CURRENCY_FORMAT_TYPES[name] + ' currency') } except KeyError: raise NotFound('Currency Format Type: ' + name)
Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type
def to_concat_skip_model(self, start_id, end_id): """Add a weighted add concatenate connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection. """ self.operation_history.append(("to_concat_skip_model", start_id, end_id)) filters_end = self.layer_list[end_id].output.shape[-1] filters_start = self.layer_list[start_id].output.shape[-1] start_node_id = self.layer_id_to_output_node_ids[start_id][0] pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] end_node_id = self.layer_id_to_output_node_ids[end_id][0] skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id) concat_layer = StubConcatenate() concat_layer.input = [ self.node_list[concat_input_node_id], self.node_list[skip_output_id], ] concat_output_node_id = self._add_node(Node(concat_layer.output_shape)) self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id) self._add_edge(concat_layer, skip_output_id, concat_output_node_id) concat_layer.output = self.node_list[concat_output_node_id] self.node_list[concat_output_node_id].shape = concat_layer.output_shape # Add the concatenate layer. new_conv_layer = get_conv_class(self.n_dim)( filters_start + filters_end, filters_end, 1 ) self._add_edge(new_conv_layer, concat_output_node_id, end_node_id) new_conv_layer.input = self.node_list[concat_output_node_id] new_conv_layer.output = self.node_list[end_node_id] self.node_list[end_node_id].shape = new_conv_layer.output_shape if self.weighted: filter_shape = (1,) * self.n_dim weights = np.zeros((filters_end, filters_end) + filter_shape) for i in range(filters_end): filter_weight = np.zeros((filters_end,) + filter_shape) center_index = (i,) + (0,) * self.n_dim filter_weight[center_index] = 1 weights[i, ...] = filter_weight weights = np.concatenate( (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1 ) bias = np.zeros(filters_end) new_conv_layer.set_weights( (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
Add a weighted add concatenate connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection.
def atc(jobid): ''' Print the at(1) script that will run for the passed job id. This is mostly for debugging so the output will just be text. CLI Example: .. code-block:: bash salt '*' at.atc <jobid> ''' atjob_file = '/var/spool/cron/atjobs/{job}'.format( job=jobid ) if __salt__['file.file_exists'](atjob_file): with salt.utils.files.fopen(atjob_file, 'r') as rfh: return ''.join([salt.utils.stringutils.to_unicode(x) for x in rfh.readlines()]) else: return {'error': 'invalid job id \'{0}\''.format(jobid)}
Print the at(1) script that will run for the passed job id. This is mostly for debugging so the output will just be text. CLI Example: .. code-block:: bash salt '*' at.atc <jobid>
def calculate_size(name, service_name): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_str(service_name) return data_size
Calculates the request payload size
def is_credit_card(string, card_type=None): """ Checks if a string is a valid credit card number. If card type is provided then it checks that specific type, otherwise any known credit card number will be accepted. :param string: String to check. :type string: str :param card_type: Card type. :type card_type: str Can be one of these: * VISA * MASTERCARD * AMERICAN_EXPRESS * DINERS_CLUB * DISCOVER * JCB or None. Default to None (any card). :return: True if credit card, false otherwise. :rtype: bool """ if not is_full_string(string): return False if card_type: if card_type not in CREDIT_CARDS: raise KeyError( 'Invalid card type "{}". Valid types are: {}'.format(card_type, ', '.join(CREDIT_CARDS.keys())) ) return bool(CREDIT_CARDS[card_type].search(string)) for c in CREDIT_CARDS: if CREDIT_CARDS[c].search(string): return True return False
Checks if a string is a valid credit card number. If card type is provided then it checks that specific type, otherwise any known credit card number will be accepted. :param string: String to check. :type string: str :param card_type: Card type. :type card_type: str Can be one of these: * VISA * MASTERCARD * AMERICAN_EXPRESS * DINERS_CLUB * DISCOVER * JCB or None. Default to None (any card). :return: True if credit card, false otherwise. :rtype: bool
def execute_side_effect(side_effect=UNDEFINED, args=UNDEFINED, kwargs=UNDEFINED): """ Executes a side effect if one is defined. :param side_effect: The side effect to execute :type side_effect: Mixed. If it's an exception it's raised. If it's callable it's called with teh parameters. :param tuple args: The arguments passed to the stubbed out method :param dict kwargs: The kwargs passed to the subbed out method. :rtype: mixed :returns: Whatever the passed side_effect returns :raises: Whatever error is defined as the side_effect """ if args == UNDEFINED: args = tuple() if kwargs == UNDEFINED: kwargs = {} if isinstance(side_effect, (BaseException, Exception, StandardError)): raise side_effect elif hasattr(side_effect, '__call__'): # If it's callable... return side_effect(*args, **kwargs) else: raise Exception("Caliendo doesn't know what to do with your side effect. {0}".format(side_effect))
Executes a side effect if one is defined. :param side_effect: The side effect to execute :type side_effect: Mixed. If it's an exception it's raised. If it's callable it's called with teh parameters. :param tuple args: The arguments passed to the stubbed out method :param dict kwargs: The kwargs passed to the subbed out method. :rtype: mixed :returns: Whatever the passed side_effect returns :raises: Whatever error is defined as the side_effect
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs a modification via this client.""" new_metadata = self.add_login_metadata(metadata) try: return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self.retry_login() new_metadata = self.add_login_metadata(metadata) return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
Runs a modification via this client.
def set_min_lease(self, min_lease): """ Set the minimum lease period in months. :param min_lease: int """ self._query_params += str(QueryParam.MIN_LEASE) + str(min_lease)
Set the minimum lease period in months. :param min_lease: int
def run(self): """ function called when thread is started """ global parallel if parallel: download_parallel(self.url, self.directory, self.idx, self.min_file_size, self.max_file_size, self.no_redirects) else: download(self.url, self.directory, self.idx, self.min_file_size, self.max_file_size, self.no_redirects)
function called when thread is started
def get_time_to_merge_request_response(self, item): """Get the first date at which a review was made on the PR by someone other than the user who created the PR """ review_dates = [str_to_datetime(review['created_at']) for review in item['review_comments_data'] if item['user']['login'] != review['user']['login']] if review_dates: return min(review_dates) return None
Get the first date at which a review was made on the PR by someone other than the user who created the PR
def getRnaQuantMetadata(self): """ input is tab file with no header. Columns are: Id, annotations, description, name, readGroupId where annotation is a comma separated list """ rnaQuantId = self.getLocalId() with self._db as dataSource: rnaQuantReturned = dataSource.getRnaQuantificationById( rnaQuantId) self.addRnaQuantMetadata(rnaQuantReturned)
input is tab file with no header. Columns are: Id, annotations, description, name, readGroupId where annotation is a comma separated list
def initialTrendSmoothingFactors(self, timeSeries): """ Calculate the initial Trend smoothing Factor b0. Explanation: http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing :return: Returns the initial trend smoothing factor b0 """ result = 0.0 seasonLength = self.get_parameter("seasonLength") k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice for i in xrange(0, k): result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength return result / k
Calculate the initial Trend smoothing Factor b0. Explanation: http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing :return: Returns the initial trend smoothing factor b0
def _pretty_size(size): ''' Print sizes in a similar fashion as eclean ''' units = [' G', ' M', ' K', ' B'] while units and size >= 1000: size = size / 1024.0 units.pop() return '{0}{1}'.format(round(size, 1), units[-1])
Print sizes in a similar fashion as eclean
def ensure_dirs(filename): """Make sure the directories exist for `filename`.""" dirname, _ = os.path.split(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname)
Make sure the directories exist for `filename`.
def train_rdp_classifier( training_seqs_file, taxonomy_file, model_output_dir, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may """ app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpTrainer(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_taxonomy_file = tempfile.NamedTemporaryFile( prefix='RdpTaxonomy_', suffix='.txt', dir=tmp_dir) temp_taxonomy_file.write(taxonomy_file.read()) temp_taxonomy_file.seek(0) app.Parameters['taxonomy_file'].on(temp_taxonomy_file.name) app.Parameters['model_output_dir'].on(model_output_dir) return app(training_seqs_file)
Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may
def read_json(self): """Calls the overridden method. :returns: The read metadata. :rtype: dict """ with reading_ancillary_files(self): metadata = super(GenericLayerMetadata, self).read_json() return metadata
Calls the overridden method. :returns: The read metadata. :rtype: dict
def _parse_siblings(s, **kw): """ http://stackoverflow.com/a/26809037 """ bracket_level = 0 current = [] # trick to remove special-case of trailing chars for c in (s + ","): if c == "," and bracket_level == 0: yield parse_node("".join(current), **kw) current = [] else: if c == "(": bracket_level += 1 elif c == ")": bracket_level -= 1 current.append(c)
http://stackoverflow.com/a/26809037
def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols
Returns names of 'object' columns in the DataFrame.
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None): """ Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications """ epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)) return list(filter(epmatch, xmrs.eps()))
Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications
def dump(self, config, instance, file_object, prefer=None, **kwargs): """ An abstract method that dumps to a given file object. :param class config: The config class of the instance :param object instance: The instance to dump :param file file_object: The file object to dump to :param str prefer: The preferred serialization module name """ file_object.write(self.dumps(config, instance, prefer=prefer, **kwargs))
An abstract method that dumps to a given file object. :param class config: The config class of the instance :param object instance: The instance to dump :param file file_object: The file object to dump to :param str prefer: The preferred serialization module name
def build_collision_table(aliases, levels=COLLISION_CHECK_LEVEL_DEPTH): """ Build the collision table according to the alias configuration file against the entire command table. self.collided_alias is structured as: { 'collided_alias': [the command level at which collision happens] } For example: { 'account': [1, 2] } This means that 'account' is a reserved command in level 1 and level 2 of the command tree because (az account ...) and (az storage account ...) lvl 1 lvl 2 Args: levels: the amount of levels we tranverse through the command table tree. """ collided_alias = defaultdict(list) for alias in aliases: # Only care about the first word in the alias because alias # cannot have spaces (unless they have positional arguments) word = alias.split()[0] for level in range(1, levels + 1): collision_regex = r'^{}{}($|\s)'.format(r'([a-z\-]*\s)' * (level - 1), word.lower()) if list(filter(re.compile(collision_regex).match, azext_alias.cached_reserved_commands)) \ and level not in collided_alias[word]: collided_alias[word].append(level) telemetry.set_collided_aliases(list(collided_alias.keys())) return collided_alias
Build the collision table according to the alias configuration file against the entire command table. self.collided_alias is structured as: { 'collided_alias': [the command level at which collision happens] } For example: { 'account': [1, 2] } This means that 'account' is a reserved command in level 1 and level 2 of the command tree because (az account ...) and (az storage account ...) lvl 1 lvl 2 Args: levels: the amount of levels we tranverse through the command table tree.
def get_all_route_tables(self, route_table_ids=None, filters=None): """ Retrieve information about your routing tables. You can filter results to return information only about those route tables that match your search parameters. Otherwise, all route tables associated with your account are returned. :type route_table_ids: list :param route_table_ids: A list of strings with the desired route table IDs. :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. :rtype: list :return: A list of :class:`boto.vpc.routetable.RouteTable` """ params = {} if route_table_ids: self.build_list_params(params, route_table_ids, "RouteTableId") if filters: self.build_filter_params(params, dict(filters)) return self.get_list('DescribeRouteTables', params, [('item', RouteTable)])
Retrieve information about your routing tables. You can filter results to return information only about those route tables that match your search parameters. Otherwise, all route tables associated with your account are returned. :type route_table_ids: list :param route_table_ids: A list of strings with the desired route table IDs. :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. :rtype: list :return: A list of :class:`boto.vpc.routetable.RouteTable`
def trigger_callback(self, sid, namespace, id, data): """Invoke an application callback.""" callback = None try: callback = self.callbacks[sid][namespace][id] except KeyError: # if we get an unknown callback we just ignore it self._get_logger().warning('Unknown callback received, ignoring.') else: del self.callbacks[sid][namespace][id] if callback is not None: callback(*data)
Invoke an application callback.
def create_token(user, client, scope, id_token_dic=None): """ Create and populate a Token object. Return a Token object. """ token = Token() token.user = user token.client = client token.access_token = uuid.uuid4().hex if id_token_dic is not None: token.id_token = id_token_dic token.refresh_token = uuid.uuid4().hex token.expires_at = timezone.now() + timedelta( seconds=settings.get('OIDC_TOKEN_EXPIRE')) token.scope = scope return token
Create and populate a Token object. Return a Token object.
def delete_messages(self, ids): """ Delete selected messages for the current user :param ids: list of ids """ str_ids = self._return_comma_list(ids) return self.request('MsgAction', {'action': {'op': 'delete', 'id': str_ids}})
Delete selected messages for the current user :param ids: list of ids
def set_servo_angle(self, goalangle, goaltime, led): """ Sets the servo angle (in degrees) Enable torque using torque_on function before calling this Args: goalangle (int): The desired angle in degrees, range -150 to 150 goaltime (int): the time taken to move from present position to goalposition led (int): the LED color 0x00 LED off 0x04 GREEN 0x08 BLUE 0x10 RED """ if (self.servomodel==0x06) or (self.servomodel == 0x04): goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129) else: goalposition = scale(goalangle, -150, 150, 21, 1002) self.set_servo_position(goalposition, goaltime, led)
Sets the servo angle (in degrees) Enable torque using torque_on function before calling this Args: goalangle (int): The desired angle in degrees, range -150 to 150 goaltime (int): the time taken to move from present position to goalposition led (int): the LED color 0x00 LED off 0x04 GREEN 0x08 BLUE 0x10 RED
def validateObjectPath(p): """ Ensures that the provided object path conforms to the DBus standard. Throws a L{error.MarshallingError} if non-conformant @type p: C{string} @param p: A DBus object path """ if not p.startswith('/'): raise MarshallingError('Object paths must begin with a "/"') if len(p) > 1 and p[-1] == '/': raise MarshallingError('Object paths may not end with "/"') if '//' in p: raise MarshallingError('"//" is not allowed in object paths"') if invalid_obj_path_re.search(p): raise MarshallingError('Invalid characters contained in object path')
Ensures that the provided object path conforms to the DBus standard. Throws a L{error.MarshallingError} if non-conformant @type p: C{string} @param p: A DBus object path
def fire(self, *args, **kwargs): """ Emit the signal, calling all coroutines in-line with the given arguments and in the order they were registered. This is obviously a coroutine. Instead of calling :meth:`fire` explicitly, the ad-hoc signal object itself can be called, too. """ for token, coro in list(self._connections.items()): keep = yield from coro(*args, **kwargs) if not keep: del self._connections[token]
Emit the signal, calling all coroutines in-line with the given arguments and in the order they were registered. This is obviously a coroutine. Instead of calling :meth:`fire` explicitly, the ad-hoc signal object itself can be called, too.
def results(self, request): "Match results to given term and return the serialized HttpResponse." results = {} form = self.form(request.GET) if form.is_valid(): options = form.cleaned_data term = options.get('term', '') raw_data = self.get_query(request, term) results = self.format_results(raw_data, options) return self.response(results)
Match results to given term and return the serialized HttpResponse.
def method_selector_fn(self): """Gets the method selector from the config. """ if callable(self.json_rpc_method): return self.json_rpc_method elif isinstance(self.json_rpc_method, (str,)): return lambda *_: self.json_rpc_method raise ValueError("``json_rpc_method`` config invalid. May be a string or function")
Gets the method selector from the config.
def _locatedownload(self, remote_path, **kwargs): """百度云管家获得方式 :param remote_path: 需要下载的文件路径 :type remote_path: str """ params = { 'path': remote_path } url = 'https://{0}/rest/2.0/pcs/file'.format(BAIDUPCS_SERVER) return self._request('file', 'locatedownload', url=url, extra_params=params, **kwargs)
百度云管家获得方式 :param remote_path: 需要下载的文件路径 :type remote_path: str
def from_descriptions(cls, text, lexicon=None, source='CSV', dlm=',', points=False, abbreviations=False, complete=False, order='depth', columns=None, ): """ Convert a CSV string into a striplog. Expects 2 or 3 fields: top, description OR top, base, description Args: text (str): The input text, given by ``well.other``. lexicon (Lexicon): A lexicon, required to extract components. source (str): A source. Default: 'CSV'. dlm (str): The delimiter, given by ``well.dlm``. Default: ',' points (bool): Whether to treat as points or as intervals. abbreviations (bool): Whether to expand abbreviations in the description. Default: False. complete (bool): Whether to make 'blank' intervals, or just leave gaps. Default: False. order (str): The order, 'depth' or 'elevation'. Default: 'depth'. columns (tuple or list): The names of the columns. Returns: Striplog: A ``striplog`` object. Example: # TOP BOT LITH 312.34, 459.61, Sandstone 459.71, 589.61, Limestone 589.71, 827.50, Green shale 827.60, 1010.84, Fine sandstone """ text = re.sub(r'(\n+|\r\n|\r)', '\n', text.strip()) as_strings = [] try: f = StringIO(text) # Python 3 except TypeError: f = StringIO(unicode(text)) # Python 2 reader = csv.reader(f, delimiter=dlm, skipinitialspace=True) for row in reader: as_strings.append(row) f.close() if not columns: if order[0].lower() == 'e': columns = ('base', 'top', 'description') else: columns = ('top', 'base', 'description') result = {k: [] for k in columns} # Set the indices for the fields. tix = columns.index('top') bix = columns.index('base') dix = columns.index('description') for i, row in enumerate(as_strings): # THIS ONLY WORKS FOR MISSING TOPS! if len(row) == 2: row = [row[0], None, row[1]] # TOP this_top = float(row[tix]) # THIS ONLY WORKS FOR MISSING TOPS! # BASE # Base is null: use next top if this isn't the end. if row[1] is None: if i < len(as_strings)-1: this_base = float(as_strings[i+1][0]) # Next top. else: this_base = this_top + 1 # Default to 1 m thick at end. else: this_base = float(row[bix]) # DESCRIPTION this_descr = row[dix].strip() # Deal with making intervals or points... if not points: # Insert intervals where needed. if complete and (i > 0) and (this_top != result['base'][-1]): result['top'].append(result['base'][-1]) result['base'].append(this_top) result['description'].append('') else: this_base = None # Gets set to Top in striplog creation # ASSIGN result['top'].append(this_top) result['base'].append(this_base) result['description'].append(this_descr) # Build the list. list_of_Intervals = [] for i, t in enumerate(result['top']): b = result['base'][i] d = result['description'][i] interval = Interval(t, b, description=d, lexicon=lexicon, abbreviations=abbreviations) list_of_Intervals.append(interval) return cls(list_of_Intervals, source=source)
Convert a CSV string into a striplog. Expects 2 or 3 fields: top, description OR top, base, description Args: text (str): The input text, given by ``well.other``. lexicon (Lexicon): A lexicon, required to extract components. source (str): A source. Default: 'CSV'. dlm (str): The delimiter, given by ``well.dlm``. Default: ',' points (bool): Whether to treat as points or as intervals. abbreviations (bool): Whether to expand abbreviations in the description. Default: False. complete (bool): Whether to make 'blank' intervals, or just leave gaps. Default: False. order (str): The order, 'depth' or 'elevation'. Default: 'depth'. columns (tuple or list): The names of the columns. Returns: Striplog: A ``striplog`` object. Example: # TOP BOT LITH 312.34, 459.61, Sandstone 459.71, 589.61, Limestone 589.71, 827.50, Green shale 827.60, 1010.84, Fine sandstone
def check_hotkey_unique(self, modifiers, hotKey, newFilterPattern, targetItem): """ Checks that the given hotkey is not already in use. Also checks the special hotkeys configured from the advanced settings dialog. @param modifiers: modifiers for the hotkey @param hotKey: the hotkey to check @param newFilterPattern: @param targetItem: the phrase for which the hotKey to be used """ for item in self.allFolders: if model.TriggerMode.HOTKEY in item.modes: if item.modifiers == modifiers and item.hotKey == hotKey and item.filter_matches(newFilterPattern): return item is targetItem, item for item in self.allItems: if model.TriggerMode.HOTKEY in item.modes: if item.modifiers == modifiers and item.hotKey == hotKey and item.filter_matches(newFilterPattern): return item is targetItem, item for item in self.globalHotkeys: if item.enabled: if item.modifiers == modifiers and item.hotKey == hotKey and item.filter_matches(newFilterPattern): return item is targetItem, item return True, None
Checks that the given hotkey is not already in use. Also checks the special hotkeys configured from the advanced settings dialog. @param modifiers: modifiers for the hotkey @param hotKey: the hotkey to check @param newFilterPattern: @param targetItem: the phrase for which the hotKey to be used
def insert(self): """persist the field values of this orm""" ret = True schema = self.schema fields = self.depopulate(False) q = self.query q.set_fields(fields) pk = q.insert() if pk: fields = q.fields fields[schema.pk.name] = pk self._populate(fields) else: ret = False return ret
persist the field values of this orm
def get_parser(self, prog_name): """Override to add command options.""" parser = argparse.ArgumentParser(description=self.get_description(), prog=prog_name, add_help=False) return parser
Override to add command options.
def get_parent_repository_ids(self, repository_id): """Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=repository_id) return self._hierarchy_session.get_parents(id_=repository_id)
Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def update(self, id, **kwargs): """ Updates an existing post. When the `markdown` property is present, it will be automatically converted to `mobiledoc` on v1.+ of the server. :param id: The ID of the existing post :param kwargs: The properties of the post to change :return: The updated `Post` object """ return super(PostController, self).update(id, **self._with_markdown(kwargs))
Updates an existing post. When the `markdown` property is present, it will be automatically converted to `mobiledoc` on v1.+ of the server. :param id: The ID of the existing post :param kwargs: The properties of the post to change :return: The updated `Post` object
def connect(self, callback, *args, **kwargs): """ Connects the event with the given callback. When the signal is emitted, the callback is invoked. .. note:: The signal handler is stored with a hard reference, so you need to make sure to call :class:`disconnect()` if you want the handler to be garbage collected. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback. """ if self.is_connected(callback): raise AttributeError('callback is already connected') if self.hard_subscribers is None: self.hard_subscribers = [] self.hard_subscribers.append((callback, args, kwargs))
Connects the event with the given callback. When the signal is emitted, the callback is invoked. .. note:: The signal handler is stored with a hard reference, so you need to make sure to call :class:`disconnect()` if you want the handler to be garbage collected. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback.
def parse_plotPCA(self): """Find plotPCA output""" self.deeptools_plotPCAData = dict() for f in self.find_log_files('deeptools/plotPCAData', filehandles=False): parsed_data = self.parsePlotPCAData(f) for k, v in parsed_data.items(): if k in self.deeptools_plotPCAData: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotPCAData[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotPCA') if len(self.deeptools_plotPCAData) > 0: config = { 'id': 'deeptools_pca_plot', 'title': 'deeptools: PCA Plot', 'xlab': 'PC1', 'ylab': 'PC2', 'tt_label': 'PC1 {point.x:.2f}: PC2 {point.y:.2f}', } data = dict() for s_name in self.deeptools_plotPCAData: try: data[s_name] = {'x': self.deeptools_plotPCAData[s_name][1], 'y': self.deeptools_plotPCAData[s_name][2]} except KeyError: pass if len(data) == 0: log.debug('No valid data for PCA plot') return None self.add_section( name="PCA plot", anchor="deeptools_pca", description="PCA plot with the top two principal components calculated based on genome-wide distribution of sequence reads", plot=scatter.plot(data, config) ) return len(self.deeptools_plotPCAData)
Find plotPCA output
def entry_to_matrix(prodigy_entry): """ Take in a line from the labeled json and return a vector of labels and a matrix of features for training. Two ways to get 0s: - marked as false by user - generated automatically from other entries when guess is correct Rather than iterating through entities, just get the number of the correct entity directly. Then get one or two GPEs before and after. """ doc = prodigy_entry['text'] doc = nlp(doc) geo_proced = geo.process_text(doc, require_maj=False) # find the geoproced entity that matches the Prodigy entry ent_text = np.asarray([gp['word'] for gp in geo_proced]) # get mask for correct ent #print(ent_text) match = ent_text == entry['meta']['word'] #print("match: ", match) anti_match = np.abs(match - 1) #print("Anti-match ", anti_match) match_position = match.argmax() geo_proc = geo_proced[match_position] iso = geo.cts[prodigy_entry['label']] # convert country text label to ISO feat = geo.features_to_matrix(geo_proc) answer_x = feat['matrix'] label = np.asarray(feat['labels']) if prodigy_entry['answer'] == "accept": answer_binary = label == iso answer_binary = answer_binary.astype('int') #print(answer_x.shape) #print(answer_binary.shape) elif prodigy_entry['answer'] == "reject": # all we know is that the label that was presented is wrong. # just return the corresponding row in the feature matrix, # and force the label to be 0 answer_binary = label == iso answer_x = answer_x[answer_binary,:] # just take the row corresponding to the answer answer_binary = np.asarray([0]) # set the outcome to 0 because reject # NEED TO SHARE LABELS ACROSS! THE CORRECT ONE MIGHT NOT EVEN APPEAR FOR ALL ENTITIES x = feat['matrix'] other_x = x[anti_match,:] #print(other_x) #print(label[anti_match]) # here, need to get the rows corresponding to the correct label # print(geo_proc['meta']) # here's where we get the other place name features. # Need to: # 1. do features_to_matrix but use the label of the current entity # to determine 0/1 in the feature matrix # 2. put them all into one big feature matrix, # 3. ...ordering by distance? And need to decide max entity length # 4. also include these distances as one of the features #print(answer_x.shape[0]) #print(answer_binary.shape[0]) try: if answer_x.shape[0] == answer_binary.shape[0]: return (answer_x, answer_binary) except: pass
Take in a line from the labeled json and return a vector of labels and a matrix of features for training. Two ways to get 0s: - marked as false by user - generated automatically from other entries when guess is correct Rather than iterating through entities, just get the number of the correct entity directly. Then get one or two GPEs before and after.
def get_assessment_part_form_for_update(self, assessment_part_id): """Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or assessment_part_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) mdata = {} if not result['assessmentPartId']: pass else: parent_part_id = Id(result['assessmentPartId']) mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True) lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy) if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1: mdata['sequestered']['is_read_only'] = True mdata['sequestered']['is_required'] = True obj_form = objects.AssessmentPartForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy, mdata=mdata) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def _athlete_endpoint(self, athlete): """Construct athlete endpoint from host and athlete name Keyword arguments: athlete -- Full athlete name """ return '{host}{athlete}'.format( host=self.host, athlete=quote_plus(athlete) )
Construct athlete endpoint from host and athlete name Keyword arguments: athlete -- Full athlete name
def doesNotMatch(self, value, caseSensitive=True): """ Sets the operator type to Query.Op.DoesNotMatch and sets the \ value to the inputted value. :param value <variant> :return self (useful for chaining) :usage |>>> from orb import Query as Q |>>> query = Q('comments').doesNotMatch('test') |>>> print query |comments does_not_contain test """ newq = self.copy() newq.setOp(Query.Op.DoesNotMatch) newq.setValue(value) newq.setCaseSensitive(caseSensitive) return newq
Sets the operator type to Query.Op.DoesNotMatch and sets the \ value to the inputted value. :param value <variant> :return self (useful for chaining) :usage |>>> from orb import Query as Q |>>> query = Q('comments').doesNotMatch('test') |>>> print query |comments does_not_contain test
def reset(cls): """ Reset to default settings """ cls.debug = False cls.disabled = False cls.overwrite = False cls.playback_only = False cls.recv_timeout = 5 cls.recv_endmarkers = [] cls.recv_size = None
Reset to default settings
def InitializeDebuggeeLabels(self, flags): """Initialize debuggee labels from environment variables and flags. The caller passes all the flags that the the debuglet got. This function will only use the flags used to label the debuggee. Flags take precedence over environment variables. Debuggee description is formatted from available flags. Args: flags: dictionary of debuglet command line flags. """ self._debuggee_labels = {} for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS): # var_names is a list of possible environment variables that may contain # the label value. Find the first one that is set. for name in var_names: value = os.environ.get(name) if value: # Special case for module. We omit the "default" module # to stay consistent with AppEngine. if label == labels.Debuggee.MODULE and value == 'default': break self._debuggee_labels[label] = value break if flags: self._debuggee_labels.update( {name: value for (name, value) in six.iteritems(flags) if name in _DEBUGGEE_LABELS}) self._debuggee_labels['projectid'] = self._project_id
Initialize debuggee labels from environment variables and flags. The caller passes all the flags that the the debuglet got. This function will only use the flags used to label the debuggee. Flags take precedence over environment variables. Debuggee description is formatted from available flags. Args: flags: dictionary of debuglet command line flags.
def network_deconvolution(mat, **kwargs): """Python implementation/translation of network deconvolution by MIT-KELLIS LAB. .. note:: code author:gidonro [Github username](https://github.com/gidonro/Network-Deconvolution) LICENSE: MIT-KELLIS LAB AUTHORS: Algorithm was programmed by Soheil Feizi. Paper authors are S. Feizi, D. Marbach, M. M?©dard and M. Kellis Python implementation: Gideon Rosenthal For more details, see the following paper: Network Deconvolution as a General Method to Distinguish Direct Dependencies over Networks By: Soheil Feizi, Daniel Marbach, Muriel Médard and Manolis Kellis Nature Biotechnology Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. beta (float): Scaling parameter, the program maps the largest absolute eigenvalue of the direct dependency matrix to beta. It should be between 0 and 1. alpha (float): fraction of edges of the observed dependency matrix to be kept in deconvolution process. control (int): if 0, displaying direct weights for observed interactions, if 1, displaying direct weights for both observed and non-observed interactions. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. Choosing top direct interactions (a cut-off) depends on the application and is not implemented in this code. .. note:: To apply ND on regulatory networks, follow steps explained in Supplementary notes 1.4.1 and 2.1 and 2.3 of the paper. In this implementation, input matrices are made symmetric. """ alpha = kwargs.get('alpha', 1) beta = kwargs.get('beta', 0.99) control = kwargs.get('control', 0) # ToDO : ASSERTS try: assert beta < 1 or beta > 0 assert alpha <= 1 or alpha > 0 except AssertionError: raise ValueError("alpha must be in ]0, 1] and beta in [0, 1]") # Processing the input matrix, diagonal values are filtered np.fill_diagonal(mat, 0) # Thresholding the input matrix y = stat.mquantiles(mat[:], prob=[1 - alpha]) th = mat >= y mat_th = mat * th # Making the matrix symetric if already not mat_th = (mat_th + mat_th.T) / 2 # Eigen decomposition Dv, U = LA.eigh(mat_th) D = np.diag((Dv)) lam_n = np.abs(np.min(np.min(np.diag(D)), 0)) lam_p = np.abs(np.max(np.max(np.diag(D)), 0)) m1 = lam_p * (1 - beta) / beta m2 = lam_n * (1 + beta) / beta m = max(m1, m2) # network deconvolution for i in range(D.shape[0]): D[i, i] = (D[i, i]) / (m + D[i, i]) mat_new1 = np.dot(U, np.dot(D, LA.inv(U))) # Displying direct weights if control == 0: ind_edges = (mat_th > 0) * 1.0 ind_nonedges = (mat_th == 0) * 1.0 m1 = np.max(np.max(mat * ind_nonedges)) m2 = np.min(np.min(mat_new1)) mat_new2 = (mat_new1 + np.max(m1 - m2, 0)) * ind_edges + (mat * ind_nonedges) else: m2 = np.min(np.min(mat_new1)) mat_new2 = (mat_new1 + np.max(-m2, 0)) # linearly mapping the deconvolved matrix to be between 0 and 1 m1 = np.min(np.min(mat_new2)) m2 = np.max(np.max(mat_new2)) mat_nd = (mat_new2 - m1) / (m2 - m1) return mat_nd
Python implementation/translation of network deconvolution by MIT-KELLIS LAB. .. note:: code author:gidonro [Github username](https://github.com/gidonro/Network-Deconvolution) LICENSE: MIT-KELLIS LAB AUTHORS: Algorithm was programmed by Soheil Feizi. Paper authors are S. Feizi, D. Marbach, M. M?©dard and M. Kellis Python implementation: Gideon Rosenthal For more details, see the following paper: Network Deconvolution as a General Method to Distinguish Direct Dependencies over Networks By: Soheil Feizi, Daniel Marbach, Muriel Médard and Manolis Kellis Nature Biotechnology Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. beta (float): Scaling parameter, the program maps the largest absolute eigenvalue of the direct dependency matrix to beta. It should be between 0 and 1. alpha (float): fraction of edges of the observed dependency matrix to be kept in deconvolution process. control (int): if 0, displaying direct weights for observed interactions, if 1, displaying direct weights for both observed and non-observed interactions. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. Choosing top direct interactions (a cut-off) depends on the application and is not implemented in this code. .. note:: To apply ND on regulatory networks, follow steps explained in Supplementary notes 1.4.1 and 2.1 and 2.3 of the paper. In this implementation, input matrices are made symmetric.
def install(self): """ Installation procedure, it writes basic smb.conf and uses samba-tool to provision the domain """ domain_settings = DomainSettings.get() with root(): if os.path.exists(self.SMBCONF_FILE): os.remove(self.SMBCONF_FILE) if domain_settings.mode == 'ad': domain_settings.adminpass = make_password(15) domain_settings.save() run("samba-tool domain provision " "--domain='%s' " "--workgroup='%s' " "--realm='%s' " "--use-xattrs=yes " "--use-rfc2307 " "--server-role='domain controller' " "--use-ntvfs " "--adminpass='%s'" % (domain_settings.domain, domain_settings.workgroup, domain_settings.realm, domain_settings.adminpass)) self.smbconf.write() shutil.copy2(self.SMB_KRB5CONF_FILE, self.KRB5CONF_FILE) # XXX FIXME move this to network run("echo 'nameserver 127.0.0.1' > /etc/resolv.conf") # TODO manage shares run("touch /etc/samba/shares.conf") elif domain_settings.mode == 'member': # TODO pass
Installation procedure, it writes basic smb.conf and uses samba-tool to provision the domain
def metablock(parsed): """ Remove HTML tags, entities and superfluous characters from meta blocks. """ parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",") return escape(strip_tags(decode_entities(parsed)))
Remove HTML tags, entities and superfluous characters from meta blocks.
def scrollright(self, window_name, object_name): """ Scroll right @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ if not self.verifyscrollbarhorizontal(window_name, object_name): raise LdtpServerException('Object not horizontal scrollbar') return self.setmax(window_name, object_name)
Scroll right @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
def compile_expression(source): """ THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE :param source: PYTHON SOURCE CODE :return: PYTHON FUNCTION """ # FORCE MODULES TO BE IN NAMESPACE _ = coalesce _ = listwrap _ = Date _ = convert _ = Log _ = Data _ = EMPTY_DICT _ = re _ = wrap_leaves _ = is_data fake_locals = {} try: exec( """ def output(row, rownum=None, rows=None): _source = """ + convert.value2quote(source) + """ try: return """ + source + """ except Exception as e: Log.error("Problem with dynamic function {{func|quote}}", func=_source, cause=e) """, globals(), fake_locals ) except Exception as e: Log.error("Bad source: {{source}}", source=source, cause=e) return fake_locals['output']
THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE :param source: PYTHON SOURCE CODE :return: PYTHON FUNCTION
def eval(self, key, default=None, loc=None, correct_key=True): """Evaluates and sets the specified option value in environment `loc`. Many options need ``N`` to be defined in `loc`, some need `popsize`. Details ------- Keys that contain 'filename' are not evaluated. For `loc` is None, the self-dict is used as environment :See: `evalall()`, `__call__` """ # TODO: try: loc['dim'] = loc['N'] etc if correct_key: # in_key = key # for debugging only key = self.corrected_key(key) self[key] = self(key, default, loc) return self[key]
Evaluates and sets the specified option value in environment `loc`. Many options need ``N`` to be defined in `loc`, some need `popsize`. Details ------- Keys that contain 'filename' are not evaluated. For `loc` is None, the self-dict is used as environment :See: `evalall()`, `__call__`
def _scale(self, image, width, height): """ Does the resizing of the image """ image['options']['scale'] = '%sx%s!' % (width, height) image['size'] = (width, height) # update image size return image
Does the resizing of the image
def _get_destination_paths(self): # type: (Uploader) -> # Tuple[blobxfer.operations.azure.StorageAccount, str, str, str] """Get destination paths :param Uploader self: this :rtype: tuple :return: (storage account, container, name, dpath) """ for dst in self._spec.destinations: for dpath in dst.paths: sdpath = str(dpath) cont, dir = blobxfer.util.explode_azure_path(sdpath) sa = self._creds.get_storage_account( dst.lookup_storage_account(sdpath)) yield sa, cont, dir, dpath
Get destination paths :param Uploader self: this :rtype: tuple :return: (storage account, container, name, dpath)
def ranges(self, start=None, stop=None): """Generate MappedRanges for all mapped ranges. Yields: MappedRange """ _check_start_stop(start, stop) start_loc = self._bisect_right(start) if stop is None: stop_loc = len(self._keys) else: stop_loc = self._bisect_left(stop) start_val = self._values[start_loc - 1] candidate_keys = [start] + self._keys[start_loc:stop_loc] + [stop] candidate_values = [start_val] + self._values[start_loc:stop_loc] for i, value in enumerate(candidate_values): if value is not NOT_SET: start_key = candidate_keys[i] stop_key = candidate_keys[i + 1] yield MappedRange(start_key, stop_key, value)
Generate MappedRanges for all mapped ranges. Yields: MappedRange
def frombools(cls, bools=()): """Create a set from an iterable of boolean evaluable items.""" return cls.fromint(sum(compress(cls._atoms, bools)))
Create a set from an iterable of boolean evaluable items.
def execute_input_middleware_stream(self, request, controller): """ Request comes from the controller. Returned is a request. controller arg is the name of the controller. """ start_request = request # either 'http' or 'cmd' or 'irc' controller_name = "".join(controller.get_controller_name().split('-')[:1]) middlewares = list(self.pre_input_middleware) + list(self.input_middleware) for m in middlewares: to_execute = getattr(m(controller), controller_name) if to_execute: result = to_execute(request) if GiottoControl in type(result).mro(): # a middleware class returned a control object (redirection, et al.) # ignore all other middleware classes return request, result request = result return start_request, request
Request comes from the controller. Returned is a request. controller arg is the name of the controller.
def tokenProgressFunc(state="update", action=None, text=None, tick=0): """ state: string, "update", "reading sources", "wrapping up" action: string, "stop", "start" text: string, value, additional parameter. For instance ufoname. tick: a float between 0 and 1 indicating progress. """ print("tokenProgressFunc %s: %s\n%s (%s)"%(state, str(action), str(text), str(tick)))
state: string, "update", "reading sources", "wrapping up" action: string, "stop", "start" text: string, value, additional parameter. For instance ufoname. tick: a float between 0 and 1 indicating progress.
def set_settings(self, releases=None, default_release=None): """set path to storage""" super(ShardedClusters, self).set_settings(releases, default_release) ReplicaSets().set_settings(releases, default_release)
set path to storage
def generation_fluctuating(self): """ Get generation time series of fluctuating renewables (only active power) Returns ------- :pandas:`pandas.DataFrame<dataframe>` See class definition for details. """ try: return self._generation_fluctuating.loc[[self.timeindex], :] except: return self._generation_fluctuating.loc[self.timeindex, :]
Get generation time series of fluctuating renewables (only active power) Returns ------- :pandas:`pandas.DataFrame<dataframe>` See class definition for details.
def create_image_summary(name, val): """ Args: name(str): val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3. Can be either float or uint8. Range has to be [0,255]. Returns: tf.Summary: """ assert isinstance(name, six.string_types), type(name) n, h, w, c = val.shape val = val.astype('uint8') s = tf.Summary() imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9] for k in range(n): arr = val[k] # CV2 will only write correctly in BGR chanel order if c == 3: arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) elif c == 4: arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA) tag = name if n == 1 else '{}/{}'.format(name, k) retval, img_str = cv2.imencode('.png', arr, imparams) if not retval: # Encoding has failed. continue img_str = img_str.tostring() img = tf.Summary.Image() img.height = h img.width = w # 1 - grayscale 3 - RGB 4 - RGBA img.colorspace = c img.encoded_image_string = img_str s.value.add(tag=tag, image=img) return s
Args: name(str): val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3. Can be either float or uint8. Range has to be [0,255]. Returns: tf.Summary:
def _base_repr(self, and_also=None): """Common repr logic for subclasses to hook """ items = [ "=".join((key, repr(getattr(self, key)))) for key in sorted(self._fields.keys())] if items: output = ", ".join(items) else: output = None if and_also: return "{}({}, {})".format(self.__class__.__name__, output, and_also) else: return "{}({})".format(self.__class__.__name__, output)
Common repr logic for subclasses to hook
def build_arg_parser(): """ Build an argument parser using argparse. Use it when python version is 2.7 or later. """ parser = argparse.ArgumentParser(description="Smatch table calculator -- arguments") parser.add_argument("--fl", type=argparse.FileType('r'), help='AMR ID list file') parser.add_argument('-f', nargs='+', help='AMR IDs (at least one)') parser.add_argument("-p", nargs='*', help="User list (can be none)") parser.add_argument("--fd", default=isi_dir_pre, help="AMR File directory. Default=location on isi machine") parser.add_argument('-r', type=int, default=4, help='Restart number (Default:4)') parser.add_argument('-v', action='store_true', help='Verbose output (Default:False)') return parser
Build an argument parser using argparse. Use it when python version is 2.7 or later.
def calculate_trip_shape_breakpoints(conn): """Pre-compute the shape points corresponding to each trip's stop. Depends: shapes""" from gtfspy import shapes cur = conn.cursor() breakpoints_cache = {} # Counters for problems - don't print every problem. count_bad_shape_ordering = 0 count_bad_shape_fit = 0 count_no_shape_fit = 0 trip_Is = [x[0] for x in cur.execute('SELECT DISTINCT trip_I FROM stop_times').fetchall()] for trip_I in trip_Is: # Get the shape points row = cur.execute('''SELECT shape_id FROM trips WHERE trip_I=?''', (trip_I,)).fetchone() if row is None: continue shape_id = row[0] if shape_id is None or shape_id == '': continue # Get the stop points cur.execute('''SELECT seq, lat, lon, stop_id FROM stop_times LEFT JOIN stops USING (stop_I) WHERE trip_I=? ORDER BY seq''', (trip_I,)) #print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime)) stop_points = [dict(seq=row[0], lat=row[1], lon=row[2], stop_I=row[3]) for row in cur if row[1] and row[2]] # Calculate a cache key for this sequence. # If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints: cache_key = (shape_id, tuple(x['stop_I'] for x in stop_points)) if cache_key in breakpoints_cache: breakpoints = breakpoints_cache[cache_key] else: # Must re-calculate breakpoints: shape_points = shapes.get_shape_points(cur, shape_id) breakpoints, badness \ = shapes.find_segments(stop_points, shape_points) if breakpoints != sorted(breakpoints): # route_name, route_id, route_I, trip_id, trip_I = \ # cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I # FROM trips LEFT JOIN routes USING (route_I) # WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone() # print "Ignoring: Route with bad shape ordering:", route_name, route_id, route_I, trip_id, trip_I count_bad_shape_ordering += 1 # select * from stop_times where trip_I=NNNN order by shape_break; breakpoints_cache[cache_key] = None continue # Do not set shape_break for this trip. # Add it to cache breakpoints_cache[cache_key] = breakpoints if badness > 30 * len(breakpoints): #print "bad shape fit: %s (%s, %s, %s)" % (badness, trip_I, shape_id, len(breakpoints)) count_bad_shape_fit += 1 if breakpoints is None: continue if len(breakpoints) == 0: # No valid route could be identified. #print "Ignoring: No shape identified for trip_I=%s, shape_id=%s" % (trip_I, shape_id) count_no_shape_fit += 1 continue # breakpoints is the corresponding points for each stop assert len(breakpoints) == len(stop_points) cur.executemany('UPDATE stop_times SET shape_break=? ' 'WHERE trip_I=? AND seq=? ', ((int(bkpt), int(trip_I), int(stpt['seq'])) for bkpt, stpt in zip(breakpoints, stop_points))) if count_bad_shape_fit > 0: print(" Shape trip breakpoints: %s bad fits" % count_bad_shape_fit) if count_bad_shape_ordering > 0: print(" Shape trip breakpoints: %s bad shape orderings" % count_bad_shape_ordering) if count_no_shape_fit > 0: print(" Shape trip breakpoints: %s no shape fits" % count_no_shape_fit) conn.commit()
Pre-compute the shape points corresponding to each trip's stop. Depends: shapes
def _decompose(net, wv_map, mems, block_out): """ Add the wires and logicnets to block_out and wv_map to decompose net """ def arg(x, i): # return the mapped wire vector for argument x, wire number i return wv_map[(net.args[x], i)] def destlen(): # return iterator over length of the destination in bits return range(len(net.dests[0])) def assign_dest(i, v): # assign v to the wiremap for dest[0], wire i wv_map[(net.dests[0], i)] <<= v one_var_ops = { 'w': lambda w: w, '~': lambda w: ~w, } c_two_var_ops = { '&': lambda l, r: l & r, '|': lambda l, r: l | r, '^': lambda l, r: l ^ r, 'n': lambda l, r: l.nand(r), } if net.op in one_var_ops: for i in destlen(): assign_dest(i, one_var_ops[net.op](arg(0, i))) elif net.op in c_two_var_ops: for i in destlen(): assign_dest(i, c_two_var_ops[net.op](arg(0, i), arg(1, i))) elif net.op == 's': for i in destlen(): selected_bit = arg(0, net.op_param[i]) assign_dest(i, selected_bit) elif net.op == 'c': arg_wirelist = [] # generate list of wires for vectors being concatenated for arg_vector in net.args: arg_vector_as_list = [wv_map[(arg_vector, i)] for i in range(len(arg_vector))] arg_wirelist = arg_vector_as_list + arg_wirelist for i in destlen(): assign_dest(i, arg_wirelist[i]) elif net.op == 'r': for i in destlen(): args = (arg(0, i),) dests = (wv_map[(net.dests[0], i)],) new_net = LogicNet('r', None, args=args, dests=dests) block_out.add_net(new_net) elif net.op == 'm': arg0list = [arg(0, i) for i in range(len(net.args[0]))] addr = concat_list(arg0list) new_mem = _get_new_block_mem_instance(net.op_param, mems, block_out)[1] data = as_wires(new_mem[addr]) for i in destlen(): assign_dest(i, data[i]) elif net.op == '@': addrlist = [arg(0, i) for i in range(len(net.args[0]))] addr = concat_list(addrlist) datalist = [arg(1, i) for i in range(len(net.args[1]))] data = concat_list(datalist) enable = arg(2, 0) new_mem = _get_new_block_mem_instance(net.op_param, mems, block_out)[1] new_mem[addr] <<= MemBlock.EnabledWrite(data=data, enable=enable) else: raise PyrtlInternalError('Unable to synthesize the following net ' 'due to unimplemented op :\n%s' % str(net)) return
Add the wires and logicnets to block_out and wv_map to decompose net
def split_flanks(self, _, result): """Return `result` without flanking whitespace. """ if not result.strip(): self.left, self.right = "", "" return result match = self.flank_re.match(result) assert match, "This regexp should always match" self.left, self.right = match.group(1), match.group(3) return match.group(2)
Return `result` without flanking whitespace.
def update_config(cls, config_file, config): """ Update configuration if needed. """ need_save = False # delete old env key if 'api' in config and 'env' in config['api']: del config['api']['env'] need_save = True # convert old ssh_key configuration entry ssh_key = config.get('ssh_key') sshkeys = config.get('sshkey') if ssh_key and not sshkeys: config.update({'sshkey': [ssh_key]}) need_save = True elif ssh_key and sshkeys: config.update({'sshkey': sshkeys.append(ssh_key)}) need_save = True # remove old value if ssh_key: del config['ssh_key'] need_save = True # save to disk if need_save: cls.save(config_file, config)
Update configuration if needed.
def _coerceSingleRepetition(self, dataSet): """ Make a new liveform with our parameters, and get it to coerce our data for us. """ # make a liveform because there is some logic in _coerced form = LiveForm(lambda **k: None, self.parameters, self.name) return form.fromInputs(dataSet)
Make a new liveform with our parameters, and get it to coerce our data for us.
def os_details(): """ Returns a dictionary containing details about the operating system """ # Compute architecture and linkage bits, linkage = platform.architecture() results = { # Machine details "platform.arch.bits": bits, "platform.arch.linkage": linkage, "platform.machine": platform.machine(), "platform.process": platform.processor(), "sys.byteorder": sys.byteorder, # OS details "os.name": os.name, "host.name": socket.gethostname(), "sys.platform": sys.platform, "platform.system": platform.system(), "platform.release": platform.release(), "platform.version": platform.version(), "encoding.filesystem": sys.getfilesystemencoding(), } # Paths and line separators for name in "sep", "altsep", "pathsep", "linesep": results["os.{0}".format(name)] = getattr(os, name, None) try: # Available since Python 3.4 results["os.cpu_count"] = os.cpu_count() except AttributeError: results["os.cpu_count"] = None try: # Only for Unix # pylint: disable=E1101 results["sys.dlopenflags"] = sys.getdlopenflags() except AttributeError: results["sys.dlopenflags"] = None return results
Returns a dictionary containing details about the operating system
def _find_min_start(text, max_width, unicode_aware=True, at_end=False): """ Find the starting point in the string that will reduce it to be less than or equal to the specified width when displayed on screen. :param text: The text to analyze. :param max_width: The required maximum width :param at_end: At the end of the editable line, so allow spaced for cursor. :return: The offset within `text` to start at to reduce it to the required length. """ # Is the solution trivial? Worth optimizing for text heavy UIs... if 2 * len(text) < max_width: return 0 # OK - do it the hard way... result = 0 string_len = wcswidth if unicode_aware else len char_len = wcwidth if unicode_aware else lambda x: 1 display_end = string_len(text) while display_end > max_width: result += 1 display_end -= char_len(text[0]) text = text[1:] if at_end and display_end == max_width: result += 1 return result
Find the starting point in the string that will reduce it to be less than or equal to the specified width when displayed on screen. :param text: The text to analyze. :param max_width: The required maximum width :param at_end: At the end of the editable line, so allow spaced for cursor. :return: The offset within `text` to start at to reduce it to the required length.
def build_command(self, command_name, **kwargs): """build command from command_name and keyword values Returns ------- command_bitvector : list List of bitarrays. Usage ----- Receives: command name as defined inside xml file, key-value-pairs as defined inside bit stream filed for each command """ # command_name = command_name.lower() command_bitvector = bitarray(0, endian='little') if command_name not in self.commands: raise ValueError('Unknown command %s' % command_name) command_object = self.commands[command_name] command_parts = re.split(r'\s*[+]\s*', command_object['bitstream']) # for index, part in enumerate(command_parts, start = 1): # loop over command parts for part in command_parts: # loop over command parts try: command_part_object = self.commands[part] except KeyError: command_part_object = None if command_part_object and 'bitstream'in command_part_object: # command parts of defined content and length, e.g. Slow, ... if string_is_binary(command_part_object['bitstream']): command_bitvector += bitarray(command_part_object['bitstream'], endian='little') else: command_bitvector += self.build_command(part, **kwargs) elif command_part_object: # Command parts with any content of defined length, e.g. ChipID, Address, ... if part in kwargs: value = kwargs[part] else: raise ValueError('Value of command part %s not given' % part) try: command_bitvector += value except TypeError: # value is no bitarray if string_is_binary(value): value = int(value, 2) try: command_bitvector += bitarray_from_value(value=int(value), size=command_part_object['bitlength'], fmt='I') except Exception: raise TypeError("Type of value not supported") elif string_is_binary(part): command_bitvector += bitarray(part, endian='little') # elif part in kwargs.keys(): # command_bitvector += kwargs[command_name] else: raise ValueError("Cannot process command part %s" % part) if command_bitvector.length() != command_object['bitlength']: raise ValueError("Command has unexpected length") if command_bitvector.length() == 0: raise ValueError("Command has length 0") return command_bitvector
build command from command_name and keyword values Returns ------- command_bitvector : list List of bitarrays. Usage ----- Receives: command name as defined inside xml file, key-value-pairs as defined inside bit stream filed for each command
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs): """ Infer tree branch lengths using ffitch in EMBOSS PHYLIP """ cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \ intreefile=intreefile, **kwargs) r, e = cl.run() if e: print("***ffitch could not run", file=sys.stderr) return None else: print("ffitch:", cl, file=sys.stderr) return outtreefile
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray, moderngl.Texture3D, moderngl.TextureCube]: """ Get a texture by label Args: label (str): The label for the texture to fetch Returns: Texture instance """ return self._get_resource(label, self._textures, "texture")
Get a texture by label Args: label (str): The label for the texture to fetch Returns: Texture instance
def receive(organization=None, user=None, team=None, credential_type=None, credential=None, notification_template=None, inventory_script=None, inventory=None, project=None, job_template=None, workflow=None, all=None): """Export assets from Tower. 'tower receive' exports one or more assets from a Tower instance For all of the possible assets types the TEXT can either be the assets name (or username for the case of a user) or the keyword all. Specifying all will export all of the assets of that type. """ from tower_cli.cli.transfer.receive import Receiver receiver = Receiver() assets_to_export = {} for asset_type in SEND_ORDER: assets_to_export[asset_type] = locals()[asset_type] receiver.receive(all=all, asset_input=assets_to_export)
Export assets from Tower. 'tower receive' exports one or more assets from a Tower instance For all of the possible assets types the TEXT can either be the assets name (or username for the case of a user) or the keyword all. Specifying all will export all of the assets of that type.
def calculate_integral_over_T(self, T1, T2, method): r'''Method to calculate the integral of a property over temperature with respect to temperature, using a specified method. Uses SciPy's `quad` function to perform the integral, with no options. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- T1 : float Lower limit of integration, [K] T2 : float Upper limit of integration, [K] method : str Method for which to find the integral Returns ------- integral : float Calculated integral of the property over the given range, [`units`] ''' return float(quad(lambda T: self.calculate(T, method)/T, T1, T2)[0])
r'''Method to calculate the integral of a property over temperature with respect to temperature, using a specified method. Uses SciPy's `quad` function to perform the integral, with no options. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- T1 : float Lower limit of integration, [K] T2 : float Upper limit of integration, [K] method : str Method for which to find the integral Returns ------- integral : float Calculated integral of the property over the given range, [`units`]
def encode(data, version=0, level=QR_ECLEVEL_L, hint=QR_MODE_8, case_sensitive=True): """Creates a QR-Code from string data. Args: data: string: The data to encode in a QR-code. If a unicode string is supplied, it will be encoded in UTF-8. version: int: The minimum version to use. If set to 0, the library picks the smallest version that the data fits in. level: int: Error correction level. Defaults to 'L'. hint: int: The type of data to encode. Either QR_MODE_8 or QR_MODE_KANJI. case_sensitive: bool: Should string data be encoded case-preserving? Returns: A (version, size, image) tuple, where image is a size*size PIL image of the QR-code. """ if isinstance(data, unicode): data = data.encode('utf8') elif not isinstance(data, basestring): raise ValueError('data argument must be a string.') version = int(version) if level not in levels: raise ValueError('Invalid error-correction level.') if hint not in hints: raise ValueError('Invalid encoding mode.') if case_sensitive: version, size, data = _encode(data, version, level, hint, True) else: version, size, data = _encode(data, version, level, hint, False) im = Image.frombytes('L', (size, size), data) return (version, size, im)
Creates a QR-Code from string data. Args: data: string: The data to encode in a QR-code. If a unicode string is supplied, it will be encoded in UTF-8. version: int: The minimum version to use. If set to 0, the library picks the smallest version that the data fits in. level: int: Error correction level. Defaults to 'L'. hint: int: The type of data to encode. Either QR_MODE_8 or QR_MODE_KANJI. case_sensitive: bool: Should string data be encoded case-preserving? Returns: A (version, size, image) tuple, where image is a size*size PIL image of the QR-code.
def _all_get_or_create_table(self, where, tablename, description, expectedrows=None): """Creates a new table, or if the table already exists, returns it.""" where_node = self._hdf5file.get_node(where) if not tablename in where_node: if not expectedrows is None: table = self._hdf5file.create_table(where=where_node, name=tablename, description=description, title=tablename, expectedrows=expectedrows, filters=self._all_get_filters()) else: table = self._hdf5file.create_table(where=where_node, name=tablename, description=description, title=tablename, filters=self._all_get_filters()) else: table = where_node._f_get_child(tablename) return table
Creates a new table, or if the table already exists, returns it.
def create(provider, names, opts=None, **kwargs): ''' Create an instance using Salt Cloud CLI Example: .. code-block:: bash salt minionname cloud.create my-ec2-config myinstance image=ami-1624987f size='t1.micro' ssh_username=ec2-user securitygroup=default delvol_on_destroy=True ''' client = _get_client() if isinstance(opts, dict): client.opts.update(opts) info = client.create(provider, names, **kwargs) return info
Create an instance using Salt Cloud CLI Example: .. code-block:: bash salt minionname cloud.create my-ec2-config myinstance image=ami-1624987f size='t1.micro' ssh_username=ec2-user securitygroup=default delvol_on_destroy=True
def _tilequeue_rawr_setup(cfg): """command to read from rawr queue and generate rawr tiles""" rawr_yaml = cfg.yml.get('rawr') assert rawr_yaml is not None, 'Missing rawr configuration in yaml' rawr_postgresql_yaml = rawr_yaml.get('postgresql') assert rawr_postgresql_yaml, 'Missing rawr postgresql config' from raw_tiles.formatter.msgpack import Msgpack from raw_tiles.gen import RawrGenerator from raw_tiles.source.conn import ConnectionContextManager from raw_tiles.source import parse_sources from raw_tiles.source import DEFAULT_SOURCES as DEFAULT_RAWR_SOURCES from tilequeue.rawr import RawrS3Sink from tilequeue.rawr import RawrStoreSink import boto3 # pass through the postgresql yaml config directly conn_ctx = ConnectionContextManager(rawr_postgresql_yaml) rawr_source_list = rawr_yaml.get('sources', DEFAULT_RAWR_SOURCES) assert isinstance(rawr_source_list, list), \ 'RAWR source list should be a list' assert len(rawr_source_list) > 0, \ 'RAWR source list should be non-empty' rawr_store = rawr_yaml.get('store') if rawr_store: store = make_store( rawr_store, credentials=cfg.subtree('aws credentials')) rawr_sink = RawrStoreSink(store) else: rawr_sink_yaml = rawr_yaml.get('sink') assert rawr_sink_yaml, 'Missing rawr sink config' sink_type = rawr_sink_yaml.get('type') assert sink_type, 'Missing rawr sink type' if sink_type == 's3': s3_cfg = rawr_sink_yaml.get('s3') assert s3_cfg, 'Missing s3 config' bucket = s3_cfg.get('bucket') assert bucket, 'Missing rawr sink bucket' sink_region = s3_cfg.get('region') assert sink_region, 'Missing rawr sink region' prefix = s3_cfg.get('prefix') assert prefix, 'Missing rawr sink prefix' extension = s3_cfg.get('extension') assert extension, 'Missing rawr sink extension' tags = s3_cfg.get('tags') from tilequeue.store import make_s3_tile_key_generator tile_key_gen = make_s3_tile_key_generator(s3_cfg) s3_client = boto3.client('s3', region_name=sink_region) rawr_sink = RawrS3Sink( s3_client, bucket, prefix, extension, tile_key_gen, tags) elif sink_type == 'none': from tilequeue.rawr import RawrNullSink rawr_sink = RawrNullSink() else: assert 0, 'Unknown rawr sink type %s' % sink_type rawr_source = parse_sources(rawr_source_list) rawr_formatter = Msgpack() rawr_gen = RawrGenerator(rawr_source, rawr_formatter, rawr_sink) return rawr_gen, conn_ctx
command to read from rawr queue and generate rawr tiles
def find_string_ids(self, substring, suffix_tree_id, limit=None): """Returns a set of IDs for strings that contain the given substring. """ # Find an edge for the substring. edge, ln = self.find_substring_edge(substring=substring, suffix_tree_id=suffix_tree_id) # If there isn't an edge, return an empty set. if edge is None: return set() # Get all the string IDs beneath the edge's destination node. string_ids = get_string_ids( node_id=edge.dest_node_id, node_repo=self.node_repo, node_child_collection_repo=self.node_child_collection_repo, stringid_collection_repo=self.stringid_collection_repo, length_until_end=edge.length + 1 - ln, limit=limit ) # Return a set of string IDs. return set(string_ids)
Returns a set of IDs for strings that contain the given substring.
def correct(tokens, term_freq): """ Correct a list of tokens, according to the term_freq """ log = [] output = [] for token in tokens: corrected = _correct(token, term_freq) if corrected != token: log.append((token, corrected)) output.append(corrected) return output, log
Correct a list of tokens, according to the term_freq
def position_after_whitespace(body, start_position): # type: (str, int) -> int """Reads from body starting at start_position until it finds a non-whitespace or commented character, then returns the position of that character for lexing.""" body_length = len(body) position = start_position while position < body_length: code = char_code_at(body, position) if code in ignored_whitespace_characters: position += 1 elif code == 35: # #, skip comments position += 1 while position < body_length: code = char_code_at(body, position) if not ( code is not None and (code > 0x001F or code == 0x0009) and code not in (0x000A, 0x000D) ): break position += 1 else: break return position
Reads from body starting at start_position until it finds a non-whitespace or commented character, then returns the position of that character for lexing.
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, periodic=True, verbose=True): """ tool to generate b-spline basis using vectorized De Boor recursion the basis functions extrapolate linearly past the end-knots. Parameters ---------- x : array-like, with ndims == 1. edge_knots : array-like contaning locations of the 2 edge knots. n_splines : int. number of splines to generate. must be >= spline_order+1 default: 20 spline_order : int. order of spline basis to create default: 3 sparse : boolean. whether to return a sparse basis matrix or not. default: True verbose : bool, default: True whether to print warnings Returns ------- basis : sparse csc matrix or array containing b-spline basis functions with shape (len(x), n_splines) """ if np.ravel(x).ndim != 1: raise ValueError('Data must be 1-D, but found {}'\ .format(np.ravel(x).ndim)) if (n_splines < 1) or not isinstance(n_splines, numbers.Integral): raise ValueError('n_splines must be int >= 1') if (spline_order < 0) or not isinstance(spline_order, numbers.Integral): raise ValueError('spline_order must be int >= 1') if n_splines < spline_order + 1: raise ValueError('n_splines must be >= spline_order + 1. '\ 'found: n_splines = {} and spline_order = {}'\ .format(n_splines, spline_order)) if n_splines == 0 and verbose: warnings.warn('Requested 1 spline. This is equivalent to '\ 'fitting an intercept', stacklevel=2) n_splines += spline_order * periodic # rescale edge_knots to [0,1], and generate boundary knots edge_knots = np.sort(deepcopy(edge_knots)) offset = edge_knots[0] scale = edge_knots[-1] - edge_knots[0] if scale == 0: scale = 1 boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order) diff = np.diff(boundary_knots[:2])[0] # rescale x as well x = (np.ravel(deepcopy(x)) - offset) / scale # wrap periodic values if periodic: x = x % (1 + 1e-9) # append 0 and 1 in order to get derivatives for extrapolation x = np.r_[x, 0., 1.] # determine extrapolation indices x_extrapolte_l = (x < 0) x_extrapolte_r = (x > 1) x_interpolate = ~(x_extrapolte_r + x_extrapolte_l) # formatting x = np.atleast_2d(x).T n = len(x) # augment knots aug = np.arange(1, spline_order + 1) * diff aug_knots = np.r_[-aug[::-1], boundary_knots, 1 + aug] aug_knots[-1] += 1e-9 # want last knot inclusive # prepare Haar Basis bases = (x >= aug_knots[:-1]).astype(np.int) * \ (x < aug_knots[1:]).astype(np.int) bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1 # do recursion from Hastie et al. vectorized maxi = len(aug_knots) - 1 for m in range(2, spline_order + 2): maxi -= 1 # left sub-basis num = (x - aug_knots[:maxi]) num *= bases[:, :maxi] denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi] left = num/denom # right sub-basis num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1] denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1] right = num/denom # track previous bases and update prev_bases = bases[-2:] bases = left + right if periodic and spline_order > 0: # make spline domain periodic bases[:, :spline_order] = np.max([bases[:, :spline_order], bases[:, -spline_order:]], axis=0) # remove extra splines used only for ensuring correct domain bases = bases[:, :-spline_order] # extrapolate # since we have repeated end-knots, only the last 2 basis functions are # non-zero at the end-knots, and they have equal and opposite gradient. if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0: bases[~x_interpolate] = 0. denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1]) left = prev_bases[:, :-1] / denom denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order]) right = prev_bases[:, 1:] / denom grads = (spline_order) * (left - right) if any(x_extrapolte_l): val = grads[0] * x[x_extrapolte_l] + bases[-2] bases[x_extrapolte_l] = val if any(x_extrapolte_r): val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1] bases[x_extrapolte_r] = val # get rid of the added values at 0, and 1 bases = bases[:-2] if sparse: return sp.sparse.csc_matrix(bases) return bases
tool to generate b-spline basis using vectorized De Boor recursion the basis functions extrapolate linearly past the end-knots. Parameters ---------- x : array-like, with ndims == 1. edge_knots : array-like contaning locations of the 2 edge knots. n_splines : int. number of splines to generate. must be >= spline_order+1 default: 20 spline_order : int. order of spline basis to create default: 3 sparse : boolean. whether to return a sparse basis matrix or not. default: True verbose : bool, default: True whether to print warnings Returns ------- basis : sparse csc matrix or array containing b-spline basis functions with shape (len(x), n_splines)
def _ensure_channel_connected(self, destination_id): """ Ensure we opened a channel to destination_id. """ if destination_id not in self._open_channels: self._open_channels.append(destination_id) self.send_message( destination_id, NS_CONNECTION, {MESSAGE_TYPE: TYPE_CONNECT, 'origin': {}, 'userAgent': 'PyChromecast', 'senderInfo': { 'sdkType': 2, 'version': '15.605.1.3', 'browserVersion': "44.0.2403.30", 'platform': 4, 'systemVersion': 'Macintosh; Intel Mac OS X10_10_3', 'connectionType': 1}}, no_add_request_id=True)
Ensure we opened a channel to destination_id.
def _read(self, ti, try_number, metadata=None): """ Endpoint for streaming log. :param ti: task instance object :param try_number: try_number of the task instance :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of log documents and metadata. """ if not metadata: metadata = {'offset': 0} if 'offset' not in metadata: metadata['offset'] = 0 offset = metadata['offset'] log_id = self._render_log_id(ti, try_number) logs = self.es_read(log_id, offset) next_offset = offset if not logs else logs[-1].offset metadata['offset'] = next_offset # end_of_log_mark may contain characters like '\n' which is needed to # have the log uploaded but will not be stored in elasticsearch. metadata['end_of_log'] = False if not logs \ else logs[-1].message == self.end_of_log_mark.strip() cur_ts = pendulum.now() # Assume end of log after not receiving new log for 5 min, # as executor heartbeat is 1 min and there might be some # delay before Elasticsearch makes the log available. if 'last_log_timestamp' in metadata: last_log_ts = timezone.parse(metadata['last_log_timestamp']) if cur_ts.diff(last_log_ts).in_minutes() >= 5: metadata['end_of_log'] = True if offset != next_offset or 'last_log_timestamp' not in metadata: metadata['last_log_timestamp'] = str(cur_ts) message = '\n'.join([log.message for log in logs]) return message, metadata
Endpoint for streaming log. :param ti: task instance object :param try_number: try_number of the task instance :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of log documents and metadata.
def remove(args): """Remove a file from the project's storage. The first part of the remote path is interpreted as the name of the storage provider. If there is no match the default (osfstorage) is used. """ osf = _setup_osf(args) if osf.username is None or osf.password is None: sys.exit('To remove a file you need to provide a username and' ' password.') project = osf.project(args.project) storage, remote_path = split_storage(args.target) store = project.storage(storage) for f in store.files: if norm_remote_path(f.path) == remote_path: f.remove()
Remove a file from the project's storage. The first part of the remote path is interpreted as the name of the storage provider. If there is no match the default (osfstorage) is used.
def parse_quantity(string): """ Parse quantity allows to convert the value in the resources spec like: resources: requests: cpu: "100m" memory": "200Mi" limits: memory: "300Mi" :param string: str :return: float """ number, unit = '', '' for char in string: if char.isdigit() or char == '.': number += char else: unit += char return float(number) * FACTORS.get(unit, 1)
Parse quantity allows to convert the value in the resources spec like: resources: requests: cpu: "100m" memory": "200Mi" limits: memory: "300Mi" :param string: str :return: float
def print_loop(self, sf, sftag, f=sys.stdout, file_format="nmrstar", tw=3): """Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": # First print the fields for field in self[sf][sftag][0]: print(u"{}_{}".format(tw * u" ", field), file=f) print(u"", file=f) # new line between fields and values # Then print the values for valuesdict in self[sf][sftag][1]: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}{}".format(tw * u" ", u" ".join([u"'{}'".format(value) if len(value.split()) > 1 else value for value in valuesdict.values()])), file=f) elif file_format == "json": print(json.dumps(self[sf][sftag], sort_keys=False, indent=4), file=f)
Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`