code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def set_doc_ids(self, doc_ids): """ Build xml documents from a list of document ids. Args: doc_ids -- A document id or a lost of those. """ if isinstance(doc_ids, list): self.set_documents(dict.fromkeys(doc_ids)) else: self.set_documents({doc_ids: None})
Build xml documents from a list of document ids. Args: doc_ids -- A document id or a lost of those.
def iterGet(self, objectType, *args, **coolArgs) : """Same as get. But retuns the elements one by one, much more efficient for large outputs""" for e in self._makeLoadQuery(objectType, *args, **coolArgs).iterRun() : if issubclass(objectType, pyGenoRabaObjectWrapper) : yield objectType(wrapped_object_and_bag = (e, self.bagKey)) else : yield e
Same as get. But retuns the elements one by one, much more efficient for large outputs
def bandit(self, choice_rewards): """ Multi-armed bandit method which chooses the arm for which the upper confidence bound (UCB) of expected reward is greatest. If there are multiple arms with the same UCB1 index, then one is chosen at random. An explanation is here: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf """ # count the larger of 1 and the total number of arm pulls total_pulls = max(1, sum(len(r) for r in choice_rewards.values())) def ucb1(choice): rewards = choice_rewards[choice] choice_pulls = max(len(rewards), 1) average_reward = np.nanmean(rewards) if len(rewards) else 0 error = np.sqrt(2.0 * np.log(total_pulls) / choice_pulls) return average_reward + error return max(shuffle(choice_rewards), key=ucb1)
Multi-armed bandit method which chooses the arm for which the upper confidence bound (UCB) of expected reward is greatest. If there are multiple arms with the same UCB1 index, then one is chosen at random. An explanation is here: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf
def create_storage_policy(policy_name, policy_dict, service_instance=None): ''' Creates a storage policy. Supported capability types: scalar, set, range. policy_name Name of the policy to create. The value of the argument will override any existing name in ``policy_dict``. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_storage_policy policy_name='policy name' policy_dict="$policy_dict" ''' log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec() # Hardcode the storage profile resource type policy_create_spec.resourceType = pbm.profile.ResourceType( resourceType=pbm.profile.ResourceTypeEnum.STORAGE) # Set name argument policy_dict['name'] = policy_name log.trace('Setting policy values in policy_update_spec') _apply_policy_config(policy_create_spec, policy_dict) salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec) return {'create_storage_policy': True}
Creates a storage policy. Supported capability types: scalar, set, range. policy_name Name of the policy to create. The value of the argument will override any existing name in ``policy_dict``. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_storage_policy policy_name='policy name' policy_dict="$policy_dict"
def newDocPI(self, name, content): """Creation of a processing instruction element. """ ret = libxml2mod.xmlNewDocPI(self._o, name, content) if ret is None:raise treeError('xmlNewDocPI() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a processing instruction element.
def make_tables(grammar, precedence): """Generates the ACTION and GOTO tables for the grammar. Returns: action - dict[state][lookahead] = (action, ...) goto - dict[state][just_reduced] = new_state """ ACTION = {} GOTO = {} labels = {} def get_label(closure): if closure not in labels: labels[closure] = len(labels) return labels[closure] def resolve_shift_reduce(lookahead, s_action, r_action): s_assoc, s_level = precedence[lookahead] r_assoc, r_level = precedence[r_action[1]] if s_level < r_level: return r_action elif s_level == r_level and r_assoc == LEFT: return r_action else: return s_action initial, closures, goto = grammar.closures() for closure in closures: label = get_label(closure) for rule in closure: new_action, lookahead = None, rule.lookahead if not rule.at_end: symbol = rule.rhs[rule.pos] is_terminal = symbol in grammar.terminals has_goto = symbol in goto[closure] if is_terminal and has_goto: next_state = get_label(goto[closure][symbol]) new_action, lookahead = ('shift', next_state), symbol elif rule.production == grammar.start and rule.at_end: new_action = ('accept',) elif rule.at_end: new_action = ('reduce', rule.production) if new_action is None: continue prev_action = ACTION.get((label, lookahead)) if prev_action is None or prev_action == new_action: ACTION[label, lookahead] = new_action else: types = (prev_action[0], new_action[0]) if types == ('shift', 'reduce'): chosen = resolve_shift_reduce(lookahead, prev_action, new_action) elif types == ('reduce', 'shift'): chosen = resolve_shift_reduce(lookahead, new_action, prev_action) else: raise TableConflictError(prev_action, new_action) ACTION[label, lookahead] = chosen for symbol in grammar.nonterminals: if symbol in goto[closure]: GOTO[label, symbol] = get_label(goto[closure][symbol]) return get_label(initial), ACTION, GOTO
Generates the ACTION and GOTO tables for the grammar. Returns: action - dict[state][lookahead] = (action, ...) goto - dict[state][just_reduced] = new_state
def on_data(self, ws, message, message_type, fin): """ Callback executed when message is received from the server. :param ws: Websocket client :param message: utf-8 string which we get from the server. :param message_type: Message type which is either ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY :param fin: continue flag. If 0, the data continues. """ try: if message_type == websocket.ABNF.OPCODE_TEXT: json_object = json.loads(message) if 'binary_streams' in json_object: self.callback.on_content_type(json_object['binary_streams'][0]['content_type']) elif 'error' in json_object: self.on_error(ws, json_object.get('error')) return else: self.callback.on_timing_information(json_object) except Exception: self.on_error(ws, 'Unable to parse received message.') if message_type == websocket.ABNF.OPCODE_BINARY: self.callback.on_audio_stream(message) self.callback.on_data(message)
Callback executed when message is received from the server. :param ws: Websocket client :param message: utf-8 string which we get from the server. :param message_type: Message type which is either ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY :param fin: continue flag. If 0, the data continues.
def extract(): """Extract melting points from patents.""" Paragraph.parsers = [CompoundParser(), ChemicalLabelParser(), MpParser()] Table.parsers = [] patents = [] for root, dirs, files in os.walk('../examples/mp/grants'): for filename in files: if not filename.endswith('.xml'): continue path = os.path.abspath(os.path.join(root, filename)) size = os.path.getsize(path) patents.append((path, filename, size)) patents = sorted(patents, key=lambda p: p[2]) for path, filename, size in patents: print(path) shutil.copyfile(path, '../examples/mp/used/%s' % filename) with open(path) as f: d = Document.from_file(f) if os.path.isfile('../examples/mp/results/%s.json' % filename): continue records = [r.serialize() for r in d.records if len(r.melting_points) == 1] with open('../examples/mp/results/%s.json' % filename, 'w') as fout: fout.write(json.dumps(records, ensure_ascii=False, indent=2).encode('utf8'))
Extract melting points from patents.
def from_expr(cls, expr): """Instantiate proto-expression from the given Expression""" return cls(expr.args, expr.kwargs, cls=expr.__class__)
Instantiate proto-expression from the given Expression
async def cluster_reset_all_nodes(self, soft=True): """ Send CLUSTER RESET to all nodes in the cluster If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to all nodes in the cluster """ option = 'SOFT' if soft else 'HARD' res = list() for node in await self.cluster_nodes(): res.append( await self.execute_command( 'CLUSTER RESET', option, node_id=node['id'] )) return res
Send CLUSTER RESET to all nodes in the cluster If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to all nodes in the cluster
def reset_case(self): """ Returns the case to its original state. """ for bus in self.market.case.buses: bus.p_demand = self.pdemand[bus] for task in self.tasks: for g in task.env.generators: g.p = task.env._g0[g]["p"] g.p_max = task.env._g0[g]["p_max"] g.p_min = task.env._g0[g]["p_min"] g.q = task.env._g0[g]["q"] g.q_max = task.env._g0[g]["q_max"] g.q_min = task.env._g0[g]["q_min"] g.p_cost = task.env._g0[g]["p_cost"] g.pcost_model = task.env._g0[g]["pcost_model"] g.q_cost = task.env._g0[g]["q_cost"] g.qcost_model = task.env._g0[g]["qcost_model"] g.c_startup = task.env._g0[g]["startup"] g.c_shutdown = task.env._g0[g]["shutdown"]
Returns the case to its original state.
def validate_lv_districts(session, nw): '''Validate if total load of a grid in a pkl file is what expected from LV districts Parameters ---------- session : sqlalchemy.orm.session.Session Database session nw: The network Returns ------- DataFrame compare_by_district DataFrame compare_by_loads ''' # config network intern variables nw._config = nw.import_config() nw._pf_config = nw.import_pf_config() nw._static_data = nw.import_static_data() nw._orm = nw.import_orm() # rescue peak load from input table lv_ditricts = [dist.id_db for mv in nw.mv_grid_districts() for la in mv.lv_load_areas() for dist in la.lv_grid_districts()] load_input = nw.list_lv_grid_districts(session, lv_ditricts) load_input = load_input.sum(axis=0).apply(lambda x: np.round(x, 3)) load_input.sort_index(inplace=True) load_input.index.names = ['id_db'] load_input['peak_load_retind']=load_input['peak_load_retail']+load_input['peak_load_industrial'] # search for lv_district in the grid lv_dist_idx = 0 lv_dist_dict = {} lv_load_idx = 0 lv_load_dict = {} for mv_district in nw.mv_grid_districts(): for LA in mv_district.lv_load_areas(): for lv_district in LA.lv_grid_districts(): lv_dist_idx += 1 lv_dist_dict[lv_dist_idx] = { 'id_db':lv_district.id_db, 'peak_load_residential':lv_district.peak_load_residential, 'peak_load_retail':lv_district.peak_load_retail, 'peak_load_industrial':lv_district.peak_load_industrial, 'peak_load_agricultural':lv_district.peak_load_agricultural, 'peak_load_retind': lv_district.peak_load_industrial + lv_district.peak_load_retail, } for node in lv_district.lv_grid.graph_nodes_sorted(): if isinstance(node,LVLoadDing0): lv_load_idx +=1 peak_load_agricultural = 0 peak_load_residential = 0 peak_load_retail = 0 peak_load_industrial = 0 peak_load_retind = 0 if 'agricultural' in node.consumption: tipo = 'agricultural' peak_load_agricultural = node.peak_load elif 'industrial' in node.consumption: if node.consumption['retail']==0: tipo = 'industrial' peak_load_industrial = node.peak_load elif node.consumption['industrial']==0: tipo = 'retail' peak_load_retail = node.peak_load else: tipo = 'ret_ind' peak_load_retind = node.peak_load elif 'residential' in node.consumption: tipo = 'residential' peak_load_residential = node.peak_load else: tipo = 'none' print(node.consumption) lv_load_dict[lv_load_idx] = { 'id_db':node.id_db, 'peak_load_residential':peak_load_residential, 'peak_load_retail':peak_load_retail, 'peak_load_industrial':peak_load_industrial, 'peak_load_agricultural':peak_load_agricultural, 'peak_load_retind':peak_load_retind, } for node in mv_district.mv_grid.graph_nodes_sorted(): if isinstance(node,LVLoadAreaCentreDing0): lv_load_idx +=1 lv_load_dict[lv_load_idx] = { 'id_db': node.id_db, 'peak_load_residential': node.lv_load_area.peak_load_residential, 'peak_load_retail': node.lv_load_area.peak_load_retail, 'peak_load_industrial': node.lv_load_area.peak_load_industrial, 'peak_load_agricultural': node.lv_load_area.peak_load_agricultural, 'peak_load_retind':0, } #compare by LV district load_effective_lv_distr = pd.DataFrame.from_dict(lv_dist_dict,orient='index').set_index('id_db').sum(axis=0).apply(lambda x: np.round(x,3)) load_effective_lv_distr.sort_index(inplace=True) compare_by_district = pd.concat([load_input,load_effective_lv_distr,load_input==load_effective_lv_distr],axis=1) compare_by_district.columns = ['table','ding0','equal?'] compare_by_district.index.names = ['sector'] #compare by LV Loads load_effective_lv_load = pd.DataFrame.from_dict(lv_load_dict,orient='index').set_index('id_db') load_effective_lv_load = load_effective_lv_load.sum(axis=0).apply(lambda x: np.round(x,3)) load_effective_lv_load.sort_index(inplace=True) load_effective_lv_load['peak_load_retind'] = load_effective_lv_load['peak_load_retail'] + \ load_effective_lv_load['peak_load_industrial'] + \ load_effective_lv_load['peak_load_retind'] compare_by_load = pd.concat([load_input,load_effective_lv_load,load_input==load_effective_lv_load],axis=1) compare_by_load.columns = ['table','ding0','equal?'] compare_by_load.index.names = ['sector'] return compare_by_district, compare_by_load
Validate if total load of a grid in a pkl file is what expected from LV districts Parameters ---------- session : sqlalchemy.orm.session.Session Database session nw: The network Returns ------- DataFrame compare_by_district DataFrame compare_by_loads
def has_index(self, name): """ Returns whether this table has an Index with the given name. :param name: The index name :type name: str :rtype: bool """ name = self._normalize_identifier(name) return name in self._indexes
Returns whether this table has an Index with the given name. :param name: The index name :type name: str :rtype: bool
def start_range(self): """Similar to the junction range but don't need to check for leftmost or rightmost""" if len(self._exons) == 0: return None return GenomicRange(self._exons[0].chr, min([x.start for x in self._exons]),# must be part of junction max([x.start for x in self._exons]))
Similar to the junction range but don't need to check for leftmost or rightmost
def CNOT(control, target): """Produces a controlled-NOT (controlled-X) gate:: CNOT = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]] This gate applies to two qubit arguments to produce the controlled-not gate instruction. :param control: The control qubit. :param target: The target qubit. The target qubit has an X-gate applied to it if the control qubit is in the ``|1>`` state. :returns: A Gate object. """ return Gate(name="CNOT", params=[], qubits=[unpack_qubit(q) for q in (control, target)])
Produces a controlled-NOT (controlled-X) gate:: CNOT = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]] This gate applies to two qubit arguments to produce the controlled-not gate instruction. :param control: The control qubit. :param target: The target qubit. The target qubit has an X-gate applied to it if the control qubit is in the ``|1>`` state. :returns: A Gate object.
def delete(network): """libvirt network cleanup. @raise: libvirt.libvirtError. """ try: network.destroy() except libvirt.libvirtError as error: raise RuntimeError("Unable to destroy network: {}".format(error))
libvirt network cleanup. @raise: libvirt.libvirtError.
def get_and_check_tasks_for(context, task, msg_prefix=''): """Given a parent task, return the reason the parent task was spawned. ``.taskcluster.yml`` uses this to know whether to spawn an action, cron, or decision task definition. ``tasks_for`` must be a valid one defined in the context. Args: task (dict): the task definition. msg_prefix (str): the string prefix to use for an exception. Raises: (KeyError, ValueError): on failure to find a valid ``tasks_for``. Returns: str: the ``tasks_for`` """ tasks_for = task['extra']['tasks_for'] if tasks_for not in context.config['valid_tasks_for']: raise ValueError( '{}Unknown tasks_for: {}'.format(msg_prefix, tasks_for) ) return tasks_for
Given a parent task, return the reason the parent task was spawned. ``.taskcluster.yml`` uses this to know whether to spawn an action, cron, or decision task definition. ``tasks_for`` must be a valid one defined in the context. Args: task (dict): the task definition. msg_prefix (str): the string prefix to use for an exception. Raises: (KeyError, ValueError): on failure to find a valid ``tasks_for``. Returns: str: the ``tasks_for``
def attach(self, file): """Attaches the queried record with `file` and returns the response after validating the response :param file: File to attach to the record :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The attachment record metadata """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults('Attaching a file to multiple records is not supported') except NoResults: raise NoResults('Attempted to attach file to a non-existing record') if not os.path.isfile(file): raise InvalidUsage("Attachment '%s' must be an existing regular file" % file) response = self.session.post( self._get_attachment_url('upload'), data={ 'table_name': self.table, 'table_sys_id': result['sys_id'], 'file_name': ntpath.basename(file) }, files={'file': open(file, 'rb')}, headers={'content-type': None} # Temporarily override header ) return self._get_content(response)
Attaches the queried record with `file` and returns the response after validating the response :param file: File to attach to the record :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The attachment record metadata
def removeIndividual(self, individual): """ Removes the specified individual from this repository. """ q = models.Individual.delete().where( models.Individual.id == individual.getId()) q.execute()
Removes the specified individual from this repository.
def connected_channel(self): """ Returns the voice channel the player is connected to. """ if not self.channel_id: return None return self._lavalink.bot.get_channel(int(self.channel_id))
Returns the voice channel the player is connected to.
def fetch_stackexchange( dataset, test_set_fraction=0.2, min_training_interactions=1, data_home=None, indicator_features=True, tag_features=False, download_if_missing=True, ): """ Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_. The datasets contain users answering questions: an interaction is defined as a user answering a given question. The following datasets from the StackExchange network are available: - CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions, and 70000 answers. - StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions, and 18M answers. Parameters ---------- dataset: string, one of ('crossvalidated', 'stackoverflow') The part of the StackExchange network for which to fetch the dataset. test_set_fraction: float, optional The fraction of the dataset used for testing. Splitting into the train and test set is done in a time-based fashion: all interactions before a certain time are in the train set and all interactions after that time are in the test set. min_training_interactions: int, optional Only include users with this amount of interactions in the training set. data_home: path, optional Path to the directory in which the downloaded data should be placed. Defaults to ``~/lightfm_data/``. indicator_features: bool, optional Use an [n_users, n_users] identity matrix for item features. When True with genre_features, indicator and genre features are concatenated into a single feature matrix of shape [n_users, n_users + n_genres]. download_if_missing: bool, optional Download the data if not present. Raises an IOError if False and data is missing. Notes ----- The return value is a dictionary containing the following keys: Returns ------- train: sp.coo_matrix of shape [n_users, n_items] Contains training set interactions. test: sp.coo_matrix of shape [n_users, n_items] Contains testing set interactions. item_features: sp.csr_matrix of shape [n_items, n_item_features] Contains item features. item_feature_labels: np.array of strings of shape [n_item_features,] Labels of item features. """ if not (indicator_features or tag_features): raise ValueError( "At least one of item_indicator_features " "or tag_features must be True" ) if dataset not in ("crossvalidated", "stackoverflow"): raise ValueError("Unknown dataset") if not (0.0 < test_set_fraction < 1.0): raise ValueError("Test set fraction must be between 0 and 1") urls = { "crossvalidated": ( "https://github.com/maciejkula/lightfm_datasets/releases/" "download/v0.1.0/stackexchange_crossvalidated.npz" ), "stackoverflow": ( "https://github.com/maciejkula/lightfm_datasets/releases/" "download/v0.1.0/stackexchange_stackoverflow.npz" ), } path = _common.get_data( data_home, urls[dataset], os.path.join("stackexchange", dataset), "data.npz", download_if_missing, ) data = np.load(path) interactions = sp.coo_matrix( ( data["interactions_data"], (data["interactions_row"], data["interactions_col"]), ), shape=data["interactions_shape"].flatten(), ) interactions.sum_duplicates() tag_features_mat = sp.coo_matrix( (data["features_data"], (data["features_row"], data["features_col"])), shape=data["features_shape"].flatten(), ) tag_labels = data["labels"] test_cutoff_index = int(len(interactions.data) * (1.0 - test_set_fraction)) test_cutoff_timestamp = np.sort(interactions.data)[test_cutoff_index] in_train = interactions.data < test_cutoff_timestamp in_test = np.logical_not(in_train) train = sp.coo_matrix( ( np.ones(in_train.sum(), dtype=np.float32), (interactions.row[in_train], interactions.col[in_train]), ), shape=interactions.shape, ) test = sp.coo_matrix( ( np.ones(in_test.sum(), dtype=np.float32), (interactions.row[in_test], interactions.col[in_test]), ), shape=interactions.shape, ) if min_training_interactions > 0: include = np.squeeze(np.array(train.getnnz(axis=1))) > min_training_interactions train = train.tocsr()[include].tocoo() test = test.tocsr()[include].tocoo() if indicator_features and not tag_features: features = sp.identity(train.shape[1], format="csr", dtype=np.float32) labels = np.array(["question_id:{}".format(x) for x in range(train.shape[1])]) elif not indicator_features and tag_features: features = tag_features_mat.tocsr() labels = tag_labels else: id_features = sp.identity(train.shape[1], format="csr", dtype=np.float32) features = sp.hstack([id_features, tag_features_mat]).tocsr() labels = np.concatenate( [ np.array(["question_id:{}".format(x) for x in range(train.shape[1])]), tag_labels, ] ) return { "train": train, "test": test, "item_features": features, "item_feature_labels": labels, }
Fetch a dataset from the `StackExchange network <http://stackexchange.com/>`_. The datasets contain users answering questions: an interaction is defined as a user answering a given question. The following datasets from the StackExchange network are available: - CrossValidated: From stats.stackexchange.com. Approximately 9000 users, 72000 questions, and 70000 answers. - StackOverflow: From stackoverflow.stackexchange.com. Approximately 1.3M users, 11M questions, and 18M answers. Parameters ---------- dataset: string, one of ('crossvalidated', 'stackoverflow') The part of the StackExchange network for which to fetch the dataset. test_set_fraction: float, optional The fraction of the dataset used for testing. Splitting into the train and test set is done in a time-based fashion: all interactions before a certain time are in the train set and all interactions after that time are in the test set. min_training_interactions: int, optional Only include users with this amount of interactions in the training set. data_home: path, optional Path to the directory in which the downloaded data should be placed. Defaults to ``~/lightfm_data/``. indicator_features: bool, optional Use an [n_users, n_users] identity matrix for item features. When True with genre_features, indicator and genre features are concatenated into a single feature matrix of shape [n_users, n_users + n_genres]. download_if_missing: bool, optional Download the data if not present. Raises an IOError if False and data is missing. Notes ----- The return value is a dictionary containing the following keys: Returns ------- train: sp.coo_matrix of shape [n_users, n_items] Contains training set interactions. test: sp.coo_matrix of shape [n_users, n_items] Contains testing set interactions. item_features: sp.csr_matrix of shape [n_items, n_item_features] Contains item features. item_feature_labels: np.array of strings of shape [n_item_features,] Labels of item features.
def _GetContents(self): """Read the directory, making sure we close the file if the format is bad.""" try: self._RealGetContents() except BadZipfile: if not self._filePassed: self.fp.close() self.fp = None raise
Read the directory, making sure we close the file if the format is bad.
def _client(self, host, port, unix_socket, auth): """Return a redis client for the configuration. :param str host: redis host :param int port: redis port :rtype: redis.Redis """ db = int(self.config['db']) timeout = int(self.config['timeout']) try: cli = redis.Redis(host=host, port=port, db=db, socket_timeout=timeout, password=auth, unix_socket_path=unix_socket) cli.ping() return cli except Exception as ex: self.log.error("RedisCollector: failed to connect to %s:%i. %s.", unix_socket or host, port, ex)
Return a redis client for the configuration. :param str host: redis host :param int port: redis port :rtype: redis.Redis
def _get_lib_modules(self, full): """Returns a list of the modules in the same folder as the one being wrapped for compilation as a linked library. :arg full: when True, all the code files in the source file's directory are considered as dependencies; otherwise only those explicitly needed are kept. """ #The only complication with the whole process is that we need to get the list of #dependencies for the current module. For full lib, we compile *all* the files in #the directory, otherwise only those that are explicitly required. result = [] if full: found = {} from os import path mypath = path.dirname(self.module.filepath) self.module.parent.scan_path(mypath, found) for codefile in found: self.module.parent.load_dependency(codefile.replace(".f90", ""), True, True, False) for modname, module in list(self.module.parent.modules.items()): if path.dirname(module.filepath).lower() == mypath.lower(): result.append(modname) else: result.extend(self.module.search_dependencies()) return self._process_module_needs(result)
Returns a list of the modules in the same folder as the one being wrapped for compilation as a linked library. :arg full: when True, all the code files in the source file's directory are considered as dependencies; otherwise only those explicitly needed are kept.
def forall(self, method): """ IT IS EXPECTED THE method ACCEPTS (value, coord, cube), WHERE value - VALUE FOUND AT ELEMENT coord - THE COORDINATES OF THE ELEMENT (PLEASE, READ ONLY) cube - THE WHOLE CUBE, FOR USE IN WINDOW FUNCTIONS """ for c in self._all_combos(): method(self[c], c, self.cube)
IT IS EXPECTED THE method ACCEPTS (value, coord, cube), WHERE value - VALUE FOUND AT ELEMENT coord - THE COORDINATES OF THE ELEMENT (PLEASE, READ ONLY) cube - THE WHOLE CUBE, FOR USE IN WINDOW FUNCTIONS
def parse_environment(fields, context, topics): """Resolve the be.yaml environment key Features: - Lists, e.g. ["/path1", "/path2"] - Environment variable references, via $ - Replacement field references, e.g. {key} - Topic references, e.g. {1} """ def _resolve_environment_lists(context): """Concatenate environment lists""" for key, value in context.copy().iteritems(): if isinstance(value, list): context[key] = os.pathsep.join(value) return context def _resolve_environment_references(fields, context): """Resolve $ occurences by expansion Given a dictionary {"PATH": "$PATH;somevalue;{0}"} Return {"PATH": "value_of_PATH;somevalue;myproject"}, given that the first topic - {0} - is "myproject" Arguments: fields (dict): Environment from be.yaml context (dict): Source context """ def repl(match): key = pattern[match.start():match.end()].strip("$") return context.get(key) pat = re.compile("\$\w+", re.IGNORECASE) for key, pattern in fields.copy().iteritems(): fields[key] = pat.sub(repl, pattern) \ .strip(os.pathsep) # Remove superflous separators return fields def _resolve_environment_fields(fields, context, topics): """Resolve {} occurences Supports both positional and BE_-prefixed variables. Example: BE_MYKEY -> "{mykey}" from `BE_MYKEY` {1} -> "{mytask}" from `be in myproject mytask` Returns: Dictionary of resolved fields """ source_dict = replacement_fields_from_context(context) source_dict.update(dict((str(topics.index(topic)), topic) for topic in topics)) def repl(match): key = pattern[match.start():match.end()].strip("{}") try: return source_dict[key] except KeyError: echo("PROJECT ERROR: Unavailable reference \"%s\" " "in be.yaml" % key) sys.exit(PROJECT_ERROR) for key, pattern in fields.copy().iteritems(): fields[key] = re.sub("{[\d\w]+}", repl, pattern) return fields fields = _resolve_environment_lists(fields) fields = _resolve_environment_references(fields, context) fields = _resolve_environment_fields(fields, context, topics) return fields
Resolve the be.yaml environment key Features: - Lists, e.g. ["/path1", "/path2"] - Environment variable references, via $ - Replacement field references, e.g. {key} - Topic references, e.g. {1}
def matches(property_name, regex, *, present_optional=False, message=None): """Returns a Validation that checks a property against a regex.""" def check(val): """Checks that a value matches a scope-enclosed regex.""" if not val: return present_optional else: return True if regex.search(val) else False return Validation(check, property_name, message)
Returns a Validation that checks a property against a regex.
def greet(event: str): """Greets appropriately (from http://blog.ketchum.com/how-to-write-10-common-holiday-greetings/) """ greetings = "Happy" if event == "Christmas": greetings = "Merry" if event == "Kwanzaa": greetings = "Joyous" if event == "wishes": greetings = "Warm" return "{greetings} {event}!".format(**locals())
Greets appropriately (from http://blog.ketchum.com/how-to-write-10-common-holiday-greetings/)
def set_xlimits_widgets(self, set_min=True, set_max=True): """Populate axis limits GUI with current plot values.""" xmin, xmax = self.tab_plot.ax.get_xlim() if set_min: self.w.x_lo.set_text('{0}'.format(xmin)) if set_max: self.w.x_hi.set_text('{0}'.format(xmax))
Populate axis limits GUI with current plot values.
def __has_language(self, bundleId, languageId): """Returns ``True`` if the bundle has the language, ``False`` otherwise """ return True if self.__get_language_data(bundleId=bundleId, languageId=languageId) \ else False
Returns ``True`` if the bundle has the language, ``False`` otherwise
def get_form_kwargs(self, **kwargs): ''' Pass along the request data to the form ''' kwargs = super(PrivateLessonStudentInfoView, self).get_form_kwargs(**kwargs) kwargs['request'] = self.request kwargs['payAtDoor'] = self.payAtDoor return kwargs
Pass along the request data to the form
def template_to_dict_find(item, debug=0): """ DEPRECATED: Returns infobox parsetree value using etree.find() Older template_to_dict() algorithm, uses etree.xpath() to "lookup" or find specific elements, but fails to include tail text in the order it is found, and does not _exclude_ <ext> tags (references, etc.). Compare to template_to_dict_iter(). """ if debug > 1: print("template_to_dict_find:") tmpl = item.find('value').find('template') if tmpl is not None: value = template_to_text(tmpl, debug) else: value = text_with_children(item.find('value'), debug) if debug: print(" find: %s" % value) return value
DEPRECATED: Returns infobox parsetree value using etree.find() Older template_to_dict() algorithm, uses etree.xpath() to "lookup" or find specific elements, but fails to include tail text in the order it is found, and does not _exclude_ <ext> tags (references, etc.). Compare to template_to_dict_iter().
def burstColumn(self, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn): """ Activates all of the cells in an unpredicted active column, chooses a winner cell, and, if learning is turned on, learns on one segment, growing a new segment if necessary. @param column (int) Index of bursting column. @param columnMatchingSegments (iter) Matching segments in this column, or None if there aren't any. @param prevActiveCells (list) Active cells in `t-1`. @param prevWinnerCells (list) Winner cells in `t-1`. @param learn (bool) Whether or not learning is enabled. @return (tuple) Contains: `cells` (iter), `winnerCell` (int), """ start = self.cellsPerColumn * column # Strip out destroyed cells before passing along to base _burstColumn() cellsForColumn = [cellIdx for cellIdx in xrange(start, start + self.cellsPerColumn) if cellIdx not in self.deadCells] return self._burstColumn( self.connections, self._random, self.lastUsedIterationForSegment, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn, self.numActivePotentialSynapsesForSegment, self.iteration, self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement, self.permanenceDecrement, self.maxSegmentsPerCell, self.maxSynapsesPerSegment, learn)
Activates all of the cells in an unpredicted active column, chooses a winner cell, and, if learning is turned on, learns on one segment, growing a new segment if necessary. @param column (int) Index of bursting column. @param columnMatchingSegments (iter) Matching segments in this column, or None if there aren't any. @param prevActiveCells (list) Active cells in `t-1`. @param prevWinnerCells (list) Winner cells in `t-1`. @param learn (bool) Whether or not learning is enabled. @return (tuple) Contains: `cells` (iter), `winnerCell` (int),
def validate(self, value): """Validate value.""" len_ = len(value) if self.minimum_value is not None and len_ < self.minimum_value: tpl = "Value '{val}' length is lower than allowed minimum '{min}'." raise ValidationError(tpl.format( val=value, min=self.minimum_value )) if self.maximum_value is not None and len_ > self.maximum_value: raise ValidationError( "Value '{val}' length is bigger than " "allowed maximum '{max}'.".format( val=value, max=self.maximum_value, ))
Validate value.
def _write(self, data): """ Note: print()-statements cause to multiple write calls. (write('line') and write('\n')). Of course we don't want to call `run_in_terminal` for every individual call, because that's too expensive, and as long as the newline hasn't been written, the text itself is again overwritter by the rendering of the input command line. Therefor, we have a little buffer which holds the text until a newline is written to stdout. """ if '\n' in data: # When there is a newline in the data, write everything before the # newline, including the newline itself. before, after = data.rsplit('\n', 1) to_write = self._buffer + [before, '\n'] self._buffer = [after] def run(): for s in to_write: if self._raw: self._cli.output.write_raw(s) else: self._cli.output.write(s) self._do(run) else: # Otherwise, cache in buffer. self._buffer.append(data)
Note: print()-statements cause to multiple write calls. (write('line') and write('\n')). Of course we don't want to call `run_in_terminal` for every individual call, because that's too expensive, and as long as the newline hasn't been written, the text itself is again overwritter by the rendering of the input command line. Therefor, we have a little buffer which holds the text until a newline is written to stdout.
def convertDay(self, day, prefix="", weekday=False): """Convert a datetime object representing a day into a human-ready string that can be read, spoken aloud, etc. Args: day (datetime.date): A datetime object to be converted into text. prefix (str): An optional argument that prefixes the converted string. For example, if prefix="in", you'd receive "in two days", rather than "two days", while the method would still return "tomorrow" (rather than "in tomorrow"). weekday (bool): An optional argument that returns "Monday, Oct. 1" if True, rather than "Oct. 1". Returns: A string representation of the input day, ignoring any time-related information. """ def sameDay(d1, d2): d = d1.day == d2.day m = d1.month == d2.month y = d1.year == d2.year return d and m and y tom = self.now + datetime.timedelta(days=1) if sameDay(day, self.now): return "today" elif sameDay(day, tom): return "tomorrow" if weekday: dayString = day.strftime("%A, %B %d") else: dayString = day.strftime("%B %d") # Ex) Remove '0' from 'August 03' if not int(dayString[-2]): dayString = dayString[:-2] + dayString[-1] return prefix + " " + dayString
Convert a datetime object representing a day into a human-ready string that can be read, spoken aloud, etc. Args: day (datetime.date): A datetime object to be converted into text. prefix (str): An optional argument that prefixes the converted string. For example, if prefix="in", you'd receive "in two days", rather than "two days", while the method would still return "tomorrow" (rather than "in tomorrow"). weekday (bool): An optional argument that returns "Monday, Oct. 1" if True, rather than "Oct. 1". Returns: A string representation of the input day, ignoring any time-related information.
def is_pinyin(s): """Check if *s* consists of valid Pinyin.""" re_pattern = ('(?:%(word)s|[ \t%(punctuation)s])+' % {'word': zhon.pinyin.word, 'punctuation': zhon.pinyin.punctuation}) return _is_pattern_match(re_pattern, s)
Check if *s* consists of valid Pinyin.
def _printDescription(self, hrlinetop=True): """generic method to print out a description""" if hrlinetop: self._print("----------------") NOTFOUND = "[not found]" if self.currentEntity: obj = self.currentEntity['object'] label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "OBJECT TYPE: " + Style.RESET_ALL + Fore.BLACK + uri2niceString(obj.rdftype) + Style.RESET_ALL) print(Style.BRIGHT + "URI : " + Style.RESET_ALL + Fore.GREEN + "<" + unicode(obj.uri) + ">" + Style.RESET_ALL) print(Style.BRIGHT + "TITLE : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "DESCRIPTION: " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) else: self._clear_screen() self._print("Graph: <" + self.current['fullpath'] + ">", 'TIP') self._print("----------------", "TIP") self._printStats(self.current['graph']) for obj in self.current['graph'].all_ontologies: print(Style.BRIGHT + "Ontology URI: " + Style.RESET_ALL + Fore.RED + "<%s>" % str(obj.uri) + Style.RESET_ALL) # self._print("==> Ontology URI: <%s>" % str(obj.uri), "IMPORTANT") # self._print("----------------", "TIP") label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "Title : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "Description : " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) self._print("----------------", "TIP")
generic method to print out a description
def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type
Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType`
def addBarcodesToIdentifier(read, UMI, cell): '''extract the identifier from a read and append the UMI and cell barcode before the first space''' read_id = read.identifier.split(" ") if cell == "": read_id[0] = read_id[0] + "_" + UMI else: read_id[0] = read_id[0] + "_" + cell + "_" + UMI identifier = " ".join(read_id) return identifier
extract the identifier from a read and append the UMI and cell barcode before the first space
def _get_ckptmgr_process(self): ''' Get the command to start the checkpoint manager process''' ckptmgr_main_class = 'org.apache.heron.ckptmgr.CheckpointManager' ckptmgr_ram_mb = self.checkpoint_manager_ram / (1024 * 1024) ckptmgr_cmd = [os.path.join(self.heron_java_home, "bin/java"), '-Xms%dM' % ckptmgr_ram_mb, '-Xmx%dM' % ckptmgr_ram_mb, '-XX:+PrintCommandLineFlags', '-verbosegc', '-XX:+PrintGCDetails', '-XX:+PrintGCTimeStamps', '-XX:+PrintGCDateStamps', '-XX:+PrintGCCause', '-XX:+UseGCLogFileRotation', '-XX:NumberOfGCLogFiles=5', '-XX:GCLogFileSize=100M', '-XX:+PrintPromotionFailure', '-XX:+PrintTenuringDistribution', '-XX:+PrintHeapAtGC', '-XX:+HeapDumpOnOutOfMemoryError', '-XX:+UseConcMarkSweepGC', '-XX:+UseConcMarkSweepGC', '-Xloggc:log-files/gc.ckptmgr.log', '-Djava.net.preferIPv4Stack=true', '-cp', self.checkpoint_manager_classpath, ckptmgr_main_class, '-t' + self.topology_name, '-i' + self.topology_id, '-c' + self.ckptmgr_ids[self.shard], '-p' + self.checkpoint_manager_port, '-f' + self.stateful_config_file, '-o' + self.override_config_file, '-g' + self.heron_internals_config_file] retval = {} retval[self.ckptmgr_ids[self.shard]] = Command(ckptmgr_cmd, self.shell_env) return retval
Get the command to start the checkpoint manager process
def db990(self, value=None): """ Corresponds to IDD Field `db990` Dry-bulb temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db990`'.format(value)) self._db990 = value
Corresponds to IDD Field `db990` Dry-bulb temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def _readintbe(self, length, start): """Read bits and interpret as a big-endian signed int.""" if length % 8: raise InterpretError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) return self._readint(length, start)
Read bits and interpret as a big-endian signed int.
def get_exception_information(self, index): """ @type index: int @param index: Index into the exception information block. @rtype: int @return: Exception information DWORD. """ if index < 0 or index > win32.EXCEPTION_MAXIMUM_PARAMETERS: raise IndexError("Array index out of range: %s" % repr(index)) info = self.raw.u.Exception.ExceptionRecord.ExceptionInformation value = info[index] if value is None: value = 0 return value
@type index: int @param index: Index into the exception information block. @rtype: int @return: Exception information DWORD.
def interrupt(self, data=None): """ 中断处理 :param data: 要响应的数据,不传即不响应 :return: """ self.interrupted = True if data is not None: return self.write(data) else: return True
中断处理 :param data: 要响应的数据,不传即不响应 :return:
def _create_destination(self, server_id, dest_url, owned): """ Create a listener destination instance in the Interop namespace of a WBEM server and return that instance. In order to catch any changes the server applies, the instance is retrieved again using the instance path returned by instance creation. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. dest_url (:term:`string`): URL of the listener that is used by the WBEM server to send any indications to. The URL scheme (e.g. http/https) determines whether the WBEM server uses HTTP or HTTPS for sending the indication. Host and port in the URL specify the target location to be used by the WBEM server. owned (:class:`py:bool`): Defines whether or not the created instance is *owned* by the subscription manager. Returns: :class:`~pywbem.CIMInstance`: The created instance, as retrieved from the server. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. """ # Validate server_id server = self._get_server(server_id) # validate the URL by reconstructing it. Do not allow defaults host, port, ssl = parse_url(dest_url, allow_defaults=False) schema = 'https' if ssl else 'http' listener_url = '{0}://{1}:{2}'.format(schema, host, port) this_host = getfqdn() ownership = "owned" if owned else "permanent" dest_path = CIMInstanceName(DESTINATION_CLASSNAME, namespace=server.interop_ns) dest_inst = CIMInstance(DESTINATION_CLASSNAME) dest_inst.path = dest_path dest_inst['CreationClassName'] = DESTINATION_CLASSNAME dest_inst['SystemCreationClassName'] = SYSTEM_CREATION_CLASSNAME dest_inst['SystemName'] = this_host dest_inst['Name'] = _format( 'pywbemdestination:{0}:{1}:{2}', ownership, self._subscription_manager_id, uuid.uuid4()) dest_inst['Destination'] = listener_url if owned: for i, inst in enumerate(self._owned_destinations[server_id]): if inst.path == dest_path: # It already exists, now check its properties if inst != dest_inst: server.conn.ModifyInstance(dest_inst) dest_inst = server.conn.GetInstance(dest_path) self._owned_destinations[server_id][i] = dest_inst return dest_inst dest_path = server.conn.CreateInstance(dest_inst) dest_inst = server.conn.GetInstance(dest_path) self._owned_destinations[server_id].append(dest_inst) else: # Responsibility to ensure it does not exist yet is with the user dest_path = server.conn.CreateInstance(dest_inst) dest_inst = server.conn.GetInstance(dest_path) return dest_inst
Create a listener destination instance in the Interop namespace of a WBEM server and return that instance. In order to catch any changes the server applies, the instance is retrieved again using the instance path returned by instance creation. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. dest_url (:term:`string`): URL of the listener that is used by the WBEM server to send any indications to. The URL scheme (e.g. http/https) determines whether the WBEM server uses HTTP or HTTPS for sending the indication. Host and port in the URL specify the target location to be used by the WBEM server. owned (:class:`py:bool`): Defines whether or not the created instance is *owned* by the subscription manager. Returns: :class:`~pywbem.CIMInstance`: The created instance, as retrieved from the server. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`.
def absent(name, profile="splunk"): ''' Ensure a search is absent .. code-block:: yaml API Error Search: splunk_search.absent The following parameters are required: name This is the name of the search in splunk ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '{0} is absent.'.format(name) } target = __salt__['splunk_search.get'](name, profile=profile) if target: if __opts__['test']: ret = {} ret["name"] = name ret['comment'] = "Would delete {0}".format(name) ret['result'] = None return ret result = __salt__['splunk_search.delete'](name, profile=profile) if result: ret['comment'] = '{0} was deleted'.format(name) else: ret['comment'] = 'Failed to delete {0}'.format(name) ret['result'] = False return ret
Ensure a search is absent .. code-block:: yaml API Error Search: splunk_search.absent The following parameters are required: name This is the name of the search in splunk
def strip_tx_attenuation(self, idx): """strip(1 byte) tx_attenuation :idx: int :return: int idx :return: int """ idx = Radiotap.align(idx, 2) tx_attenuation, = struct.unpack_from('<H', self._rtap, idx) return idx + 2, tx_attenuation
strip(1 byte) tx_attenuation :idx: int :return: int idx :return: int
def GenCatchallState(self): """Generate string matching state rules. This sets up initial state handlers that cover both the 'INITIAL' state and the intermediate content between fields. The lexer acts on items with precedence: - continuation characters: use the fast forward state rules. - field separators: finalize processing the field. - quotation characters: use the quotation state rules. """ for c in self.comments: self._AddToken(".", c, "PushState,EndField", "COMMENT") for c in self.cont: self._AddToken(".", c, "PushState", "FWD") for t in self.term: self._AddToken(".", t, "EndEntry", None) for s in self.sep: self._AddToken(".", s, "EndField", None) for i, q in enumerate(self.quot): self._AddToken(".", q, "PushState", "%s_STRING" % i) self._AddToken(".", ".", "AddToField", None)
Generate string matching state rules. This sets up initial state handlers that cover both the 'INITIAL' state and the intermediate content between fields. The lexer acts on items with precedence: - continuation characters: use the fast forward state rules. - field separators: finalize processing the field. - quotation characters: use the quotation state rules.
def format(self, number, **kwargs): """Format a given number. Format a number, with comma-separated thousands and custom precision/decimal places Localise by overriding the precision and thousand / decimal separators 2nd parameter `precision` can be an object matching `settings.number` Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description """ # Resursively format lists if check_type(number, 'list'): return map(lambda val: self.format(val, **kwargs)) # Clean up number number = self.parse(number) # Build options object from second param (if object) or all params, # extending defaults if check_type(kwargs, 'dict'): options = (self.settings['number'].update(kwargs)) # Clean up precision precision = self._change_precision(options['precision']) negative = (lambda num: "-" if num < 0 else "")(number) base = str(int(self.to_fixed(abs(number) or 0, precision)), 10) mod = (lambda num: len(num) % 3 if len(num) > 3 else 0)(base) # Format the number: num = negative + (lambda num: base[0:num] if num else '')(mod) num += re.sub('/(\d{3})(?=\d)/g', '$1' + options['thousand'], base[mod:]) num += (lambda val: options[ 'decimal'] + self.to_fixed(abs(number), precision) .split('.')[1] if val else '')(precision) return num
Format a given number. Format a number, with comma-separated thousands and custom precision/decimal places Localise by overriding the precision and thousand / decimal separators 2nd parameter `precision` can be an object matching `settings.number` Args: number (TYPE): Description precision (TYPE): Description thousand (TYPE): Description decimal (TYPE): Description Returns: name (TYPE): Description
def _write_to_file(self, fileinfo, filename): """Low-level function for writing text of editor to file. Args: fileinfo: FileInfo object associated to editor to be saved filename: str with filename to save to This is a low-level function that only saves the text to file in the correct encoding without doing any error handling. """ txt = to_text_string(fileinfo.editor.get_text_with_eol()) fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding)
Low-level function for writing text of editor to file. Args: fileinfo: FileInfo object associated to editor to be saved filename: str with filename to save to This is a low-level function that only saves the text to file in the correct encoding without doing any error handling.
def validate(self): """Checks whether this OmapiStartupMessage matches the implementation. @raises OmapiError: """ if self.implemented_protocol_version != self.protocol_version: raise OmapiError("protocol mismatch") if self.implemented_header_size != self.header_size: raise OmapiError("header size mismatch")
Checks whether this OmapiStartupMessage matches the implementation. @raises OmapiError:
def K_diaphragm_valve_Crane(D=None, fd=None, style=0): r'''Returns the loss coefficient for a diaphragm valve of either weir (`style` = 0) or straight-through (`style` = 1) as shown in [1]_. .. math:: K = K_1 = K_2 = N\cdot f_d For style 0 (weir), N = 149; for style 1 (straight through), N = 39. Parameters ---------- D : float, optional Diameter of the pipe section the valve in mounted in; the same as the line size [m] fd : float, optional Darcy friction factor calculated for the actual pipe flow in clean steel (roughness = 0.0018 inch) in the fully developed turbulent region; do not specify this to use the original Crane friction factor!, [-] style : int, optional Either 0 (weir type valve) or 1 (straight through weir valve) [-] Returns ------- K : float Loss coefficient with respect to the pipe inside diameter [-] Notes ----- This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions. Examples -------- >>> K_diaphragm_valve_Crane(D=.1, style=0) 2.4269804835982565 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' if D is None and fd is None: raise ValueError('Either `D` or `fd` must be specified') if fd is None: fd = ft_Crane(D) try: K = diaphragm_valve_Crane_coeffs[style]*fd except KeyError: raise KeyError('Accepted valve styles are 0 (weir) or 1 (straight through) only') return K
r'''Returns the loss coefficient for a diaphragm valve of either weir (`style` = 0) or straight-through (`style` = 1) as shown in [1]_. .. math:: K = K_1 = K_2 = N\cdot f_d For style 0 (weir), N = 149; for style 1 (straight through), N = 39. Parameters ---------- D : float, optional Diameter of the pipe section the valve in mounted in; the same as the line size [m] fd : float, optional Darcy friction factor calculated for the actual pipe flow in clean steel (roughness = 0.0018 inch) in the fully developed turbulent region; do not specify this to use the original Crane friction factor!, [-] style : int, optional Either 0 (weir type valve) or 1 (straight through weir valve) [-] Returns ------- K : float Loss coefficient with respect to the pipe inside diameter [-] Notes ----- This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions. Examples -------- >>> K_diaphragm_valve_Crane(D=.1, style=0) 2.4269804835982565 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009.
def _get_spark_app_ids(self, running_apps, requests_config, tags): """ Traverses the Spark application master in YARN to get a Spark application ID. Return a dictionary of {app_id: (app_name, tracking_url)} for Spark applications """ spark_apps = {} for app_id, (app_name, tracking_url) in iteritems(running_apps): response = self._rest_request_to_json( tracking_url, SPARK_APPS_PATH, SPARK_SERVICE_CHECK, requests_config, tags ) for app in response: app_id = app.get('id') app_name = app.get('name') if app_id and app_name: spark_apps[app_id] = (app_name, tracking_url) return spark_apps
Traverses the Spark application master in YARN to get a Spark application ID. Return a dictionary of {app_id: (app_name, tracking_url)} for Spark applications
def get_house_conn_gen_load(graph, node): """ Get generation capacity/ peak load of neighboring house connected to main branch Parameters ---------- graph : :networkx:`NetworkX Graph Obj< >` Directed graph node : graph node Node of the main branch of LV grid Returns ------- :any:`list` A list containing two items # peak load of connected house branch # generation capacity of connected generators """ generation = 0 peak_load = 0 for cus_1 in graph.successors(node): for cus_2 in graph.successors(cus_1): if not isinstance(cus_2, list): cus_2 = [cus_2] generation += sum([gen.capacity for gen in cus_2 if isinstance(gen, GeneratorDing0)]) peak_load += sum([load.peak_load for load in cus_2 if isinstance(load, LVLoadDing0)]) return [peak_load, generation]
Get generation capacity/ peak load of neighboring house connected to main branch Parameters ---------- graph : :networkx:`NetworkX Graph Obj< >` Directed graph node : graph node Node of the main branch of LV grid Returns ------- :any:`list` A list containing two items # peak load of connected house branch # generation capacity of connected generators
def obj_assd(result, reference, voxelspacing=None, connectivity=1): """ Average symmetric surface distance. Computes the average symmetric surface distance (ASSD) between the binary objects in two images. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. voxelspacing : float or sequence of floats, optional The voxelspacing in a distance unit i.e. spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. connectivity : int The neighbourhood/connectivity considered when determining what accounts for a distinct binary object as well as when determining the surface of the binary objects. This value is passed to `scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`. The decision on the connectivity is important, as it can influence the results strongly. If in doubt, leave it as it is. Returns ------- assd : float The average symmetric surface distance between all mutually existing distinct binary object(s) in ``result`` and ``reference``. The distance unit is the same as for the spacing of elements along each dimension, which is usually given in mm. See also -------- :func:`obj_asd` Notes ----- This is a real metric, obtained by calling and averaging >>> obj_asd(result, reference) and >>> obj_asd(reference, result) The binary images can therefore be supplied in any order. """ assd = numpy.mean( (obj_asd(result, reference, voxelspacing, connectivity), obj_asd(reference, result, voxelspacing, connectivity)) ) return assd
Average symmetric surface distance. Computes the average symmetric surface distance (ASSD) between the binary objects in two images. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. voxelspacing : float or sequence of floats, optional The voxelspacing in a distance unit i.e. spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. connectivity : int The neighbourhood/connectivity considered when determining what accounts for a distinct binary object as well as when determining the surface of the binary objects. This value is passed to `scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`. The decision on the connectivity is important, as it can influence the results strongly. If in doubt, leave it as it is. Returns ------- assd : float The average symmetric surface distance between all mutually existing distinct binary object(s) in ``result`` and ``reference``. The distance unit is the same as for the spacing of elements along each dimension, which is usually given in mm. See also -------- :func:`obj_asd` Notes ----- This is a real metric, obtained by calling and averaging >>> obj_asd(result, reference) and >>> obj_asd(reference, result) The binary images can therefore be supplied in any order.
def _wait_non_ressources(self, callback): """This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback""" # noqa: E501 self.trigger = threading.Lock() self.was_ended = False self.trigger.acquire() self.trigger.acquire() if not self.was_ended: callback(self)
This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback
def get_service_display_name(name): """ Get the service display name for the given service name. @see: L{get_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}. @rtype: str @return: Service display name. """ with win32.OpenSCManager( dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE ) as hSCManager: return win32.GetServiceDisplayName(hSCManager, name)
Get the service display name for the given service name. @see: L{get_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}. @rtype: str @return: Service display name.
def convertLatLngToPixelXY(self, lat, lng, level): ''' returns the x and y values of the pixel corresponding to a latitude and longitude. ''' mapSize = self.getMapDimensionsByZoomLevel(level) lat = self.clipValue(lat, self.min_lat, self.max_lat) lng = self.clipValue(lng, self.min_lng, self.max_lng) x = (lng + 180) / 360 sinlat = math.sin(lat * math.pi / 180) y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi) pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1)) pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1)) return (pixelX, pixelY)
returns the x and y values of the pixel corresponding to a latitude and longitude.
def teardown_socket(s): """Shuts down and closes a socket.""" try: s.shutdown(socket.SHUT_WR) except socket.error: pass finally: s.close()
Shuts down and closes a socket.
def frames( self, *, callers: Optional[Union[str, List[str]]] = None, callees: Optional[Union[str, List[str]]] = None, kind: Optional[TraceKind] = None, limit: Optional[int] = 10, ): """Display trace frames independent of the current issue. Parameters (all optional): callers: str or list[str] filter traces by this caller name callees: str or list[str] filter traces by this callee name kind: precondition|postcondition the type of trace frames to show limit: int (default: 10) how many trace frames to display (specify limit=None for all) Sample usage: frames callers="module.function", kind=postcondition String filters support LIKE wildcards (%, _) from SQL: % matches anything (like .* in regex) _ matches 1 character (like . in regex) """ with self.db.make_session() as session: query = ( session.query( TraceFrame.id, CallerText.contents.label("caller"), TraceFrame.caller_port, CalleeText.contents.label("callee"), TraceFrame.callee_port, ) .filter(TraceFrame.run_id == self.current_run_id) .join(CallerText, CallerText.id == TraceFrame.caller_id) .join(CalleeText, CalleeText.id == TraceFrame.callee_id) ) if callers is not None: query = self._add_list_or_string_filter_to_query( callers, query, CallerText.contents, "callers" ) if callees is not None: query = self._add_list_or_string_filter_to_query( callees, query, CalleeText.contents, "callees" ) if kind is not None: if kind not in {TraceKind.PRECONDITION, TraceKind.POSTCONDITION}: raise UserError( "Try 'frames kind=postcondition'" " or 'frames kind=precondition'." ) query = query.filter(TraceFrame.kind == kind) if limit is not None and not isinstance(limit, int): raise UserError("'limit' should be an int or None.") trace_frames = query.group_by(TraceFrame.id).order_by( CallerText.contents, CalleeText.contents ) total_trace_frames = trace_frames.count() limit = limit or total_trace_frames self._output_trace_frames( self._group_trace_frames(trace_frames, limit), limit, total_trace_frames )
Display trace frames independent of the current issue. Parameters (all optional): callers: str or list[str] filter traces by this caller name callees: str or list[str] filter traces by this callee name kind: precondition|postcondition the type of trace frames to show limit: int (default: 10) how many trace frames to display (specify limit=None for all) Sample usage: frames callers="module.function", kind=postcondition String filters support LIKE wildcards (%, _) from SQL: % matches anything (like .* in regex) _ matches 1 character (like . in regex)
def search_dashboard_entities(self, **kwargs): # noqa: E501 """Search over a customer's non-deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedDashboard If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_dashboard_entities_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_dashboard_entities_with_http_info(**kwargs) # noqa: E501 return data
Search over a customer's non-deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedDashboard If the method is called asynchronously, returns the request thread.
async def insert_news(self, **params): """Inserts news for account Accepts: - event_type - cid - access_string (of buyer) - buyer_pubkey - buyer address - owner address - price - offer type - coin ID Returns: - dict with result """ logging.debug("\n\n [+] -- Setting news debugging. ") if params.get("message"): params = json.loads(params.get("message", "{}")) if not params: return {"error":400, "reason":"Missed required fields"} logging.debug(" *** Params") event_type = params.get("event_type") cid = params.get("cid") access_string = params.get("access_string") buyer_pubkey = params.get("buyer_pubkey") buyer_address = params.get("buyer_address") owneraddr = params.get("owneraddr") price = params.get("price") offer_type = int(params.get("offer_type", -1)) coinid = params.get("coinid").upper() try: coinid = coinid.replace("TEST", "") except: pass logging.debug("\n ** Coinid") logging.debug(coinid) # Get address of content owner and check if it exists if coinid in settings.bridges.keys(): self.account.blockchain.setendpoint(settings.bridges[coinid]) else: return {"error":400, "reason": "Invalid coin ID"} owneraddr = await self.account.blockchain.ownerbycid(cid=cid) # Get sellers account seller = await getaccountbywallet(wallet=owneraddr) if "error" in seller.keys(): return seller # Connect to news table news_collection = self.database[settings.NEWS] # Get sellers price self.account.blockchain.setendpoint(settings.bridges[coinid]) if offer_type == 1: seller_price = await self.account.blockchain.getwriteprice(cid=cid) elif offer_type == 0: seller_price = await self.account.blockchain.getreadprice(cid=cid) row = {"offer_type": self.account.ident_offer[offer_type], "buyer_address":buyer_address, "cid":cid, "access_string":access_string, "buyer_pubkey": buyer_pubkey, "seller_price": seller_price, "buyer_price": price, "account_id": seller["id"], "event_type": event_type, "coinid":coinid} logging.debug("\n ** Inserting row") logging.debug(row) # Update counter inside accounts table database = client[settings.DBNAME] collection = database[settings.ACCOUNTS] await collection.find_one_and_update( {"id": int(seller["id"])}, {"$inc": {"news_count": 1}}) await collection.find_one({"id":int(seller["id"])}) # Insert data to news table await news_collection.insert_one(row) logging.debug("\n ** Fresh news") fresh = await collection.find_one({"buyer_address":buyer_address, "cid":cid}) logging.debug(fresh) return {"result":"ok"}
Inserts news for account Accepts: - event_type - cid - access_string (of buyer) - buyer_pubkey - buyer address - owner address - price - offer type - coin ID Returns: - dict with result
def visit_named_list(self, _, children): """Manage a list, represented by a ``.resources.List`` instance. This list is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``LIST``: a ``List`` resource Example ------- >>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data <List[foo] .foo(1)> <Field[name] /> </List[foo]> """ filters, resource = children resource.name = filters[0].name resource.filters = filters return resource
Manage a list, represented by a ``.resources.List`` instance. This list is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``LIST``: a ``List`` resource Example ------- >>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data <List[foo] .foo(1)> <Field[name] /> </List[foo]>
def is_empty(self): '''Returns True if all titleInfo subfields are not set or empty; returns False if any of the fields are not empty.''' return not bool(self.title or self.subtitle or self.part_number \ or self.part_name or self.non_sort or self.type)
Returns True if all titleInfo subfields are not set or empty; returns False if any of the fields are not empty.
def output(data, **kwargs): # pylint: disable=unused-argument ''' Read in the dict structure generated by the salt key API methods and print the structure. ''' color = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) strip_colors = __opts__.get('strip_colors', True) ident = 0 if __opts__.get('__multi_key'): ident = 4 if __opts__['transport'] in ('zeromq', 'tcp'): acc = 'minions' pend = 'minions_pre' den = 'minions_denied' rej = 'minions_rejected' cmap = {pend: color['RED'], acc: color['GREEN'], den: color['MAGENTA'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), den: u'{0}{1}Denied Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} else: acc = 'accepted' pend = 'pending' rej = 'rejected' cmap = {pend: color['RED'], acc: color['GREEN'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} ret = '' for status in sorted(data): ret += u'{0}\n'.format(trans[status]) for key in sorted(data[status]): key = salt.utils.data.decode(key) skey = salt.output.strip_esc_sequence(key) if strip_colors else key if isinstance(data[status], list): ret += u'{0}{1}{2}{3}\n'.format( ' ' * ident, cmap[status], skey, color['ENDC']) if isinstance(data[status], dict): ret += u'{0}{1}{2}: {3}{4}\n'.format( ' ' * ident, cmap[status], skey, data[status][key], color['ENDC']) return ret
Read in the dict structure generated by the salt key API methods and print the structure.
def use_plenary_grade_entry_view(self): """Pass through to provider GradeEntryLookupSession.use_plenary_grade_entry_view""" self._object_views['grade_entry'] = PLENARY # self._get_provider_session('grade_entry_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_grade_entry_view() except AttributeError: pass
Pass through to provider GradeEntryLookupSession.use_plenary_grade_entry_view
def avl_new_top(t1, t2, top, direction=0): """ if direction == 0: (t1, t2) is (left, right) if direction == 1: (t1, t2) is (right, left) """ top.parent = None assert top.parent is None, str(top.parent.value) top.set_child(direction, t1) top.set_child(1 - direction, t2) top.balance = max(height(t1), height(t2)) + 1 return top
if direction == 0: (t1, t2) is (left, right) if direction == 1: (t1, t2) is (right, left)
def terminate_processes(pid_list): """Terminate a list of processes by sending to each of them a SIGTERM signal, pre-emptively checking if its PID might have been reused. Parameters ---------- pid_list : list A list of process identifiers identifying active processes. """ for proc in psutil.process_iter(): if proc.pid in pid_list: proc.terminate()
Terminate a list of processes by sending to each of them a SIGTERM signal, pre-emptively checking if its PID might have been reused. Parameters ---------- pid_list : list A list of process identifiers identifying active processes.
def to_(self, off_pts): """Reverse of :meth:`from_`.""" off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) # scale according to current settings scale_pt = [self.viewer._org_scale_x, self.viewer._org_scale_y] if has_z: scale_pt.append(self.viewer._org_scale_z) off_pts = np.multiply(off_pts, scale_pt) return off_pts
Reverse of :meth:`from_`.
def refactor_string(self, data, name): """Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. """ features = _detect_future_features(data) if "print_function" in features: self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err) return finally: self.driver.grammar = self.grammar tree.future_features = features self.log_debug("Refactoring %s", name) self.refactor_tree(tree, name) return tree
Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse.
def select_delay_factor(self, delay_factor): """ Choose the greater of delay_factor or self.global_delay_factor (default). In fast_cli choose the lesser of delay_factor of self.global_delay_factor. :param delay_factor: See __init__: global_delay_factor :type delay_factor: int """ if self.fast_cli: if delay_factor <= self.global_delay_factor: return delay_factor else: return self.global_delay_factor else: if delay_factor >= self.global_delay_factor: return delay_factor else: return self.global_delay_factor
Choose the greater of delay_factor or self.global_delay_factor (default). In fast_cli choose the lesser of delay_factor of self.global_delay_factor. :param delay_factor: See __init__: global_delay_factor :type delay_factor: int
def validate(self): """Validate that the GremlinFoldedContextField is correctly representable.""" if not isinstance(self.fold_scope_location, FoldScopeLocation): raise TypeError(u'Expected FoldScopeLocation fold_scope_location, got: {} {}'.format( type(self.fold_scope_location), self.fold_scope_location)) allowed_block_types = (GremlinFoldedFilter, GremlinFoldedTraverse, Backtrack) for block in self.folded_ir_blocks: if not isinstance(block, allowed_block_types): raise AssertionError( u'Found invalid block of type {} in folded_ir_blocks: {} ' u'Allowed types are {}.' .format(type(block), self.folded_ir_blocks, allowed_block_types)) if not isinstance(self.field_type, GraphQLList): raise ValueError(u'Invalid value of "field_type", expected a list type but got: ' u'{}'.format(self.field_type)) inner_type = strip_non_null_from_type(self.field_type.of_type) if isinstance(inner_type, GraphQLList): raise GraphQLCompilationError( u'Outputting list-valued fields in a @fold context is currently ' u'not supported: {} {}'.format(self.fold_scope_location, self.field_type.of_type))
Validate that the GremlinFoldedContextField is correctly representable.
def create_server(initialize=True): """Create a server""" with provider() as p: host_string = p.create_server() if initialize: env.host_string = host_string initialize_server()
Create a server
def create_script_fact(self): """ appends the CREATE TABLE, index etc to self.ddl_text """ self.ddl_text += '---------------------------------------------\n' self.ddl_text += '-- CREATE Fact Table - ' + self.fact_table + '\n' self.ddl_text += '---------------------------------------------\n' self.ddl_text += 'DROP TABLE ' + self.fact_table + ' CASCADE CONSTRAINTS;\n' self.ddl_text += 'CREATE TABLE ' + self.fact_table + ' (\n' self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in self.col_list]) self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n' self.ddl_text += ');\n'
appends the CREATE TABLE, index etc to self.ddl_text
def result(self, *args, **kwargs): """ Construye la consulta SQL """ prettify = kwargs.get('pretty', False) sql = 'CREATE %s %s' % (self._type, self._class) if prettify: sql += '\n' else: sql += ' ' if self._type.lower() == 'edge': sql += " FROM %s TO %s " % (self._from, self._to) if self._cluster: sql += 'CLUSTER %s' % self._cluster if prettify: sql += '\n' else: sql += ' ' if self.data: sql += 'CONTENT ' + json.dumps(self.data) return sql
Construye la consulta SQL
def get_bonds(input_group): """Utility function to get indices (in pairs) of the bonds.""" out_list = [] for i in range(len(input_group.bond_order_list)): out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],)) return out_list
Utility function to get indices (in pairs) of the bonds.
def format_file_node(import_graph, node, indent): """Prettyprint nodes based on their provenance.""" f = import_graph.provenance[node] if isinstance(f, resolve.Direct): out = '+ ' + f.short_path elif isinstance(f, resolve.Local): out = ' ' + f.short_path elif isinstance(f, resolve.System): out = ':: ' + f.short_path elif isinstance(f, resolve.Builtin): out = '(%s)' % f.module_name else: out = '%r' % node return ' '*indent + out
Prettyprint nodes based on their provenance.
def merge_lists(*args): """Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list """ out = {} for contacts in filter(None, args): for contact in contacts: out[contact.value] = contact return list(out.values())
Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list
def check_existens_of_staging_tag_in_remote_repo(): """ This method will check, if the given tag exists as a staging tag in the remote repository. The intention is, that every tag, which should be deployed on a production envirnment, has to be deployed on a staging environment before. """ staging_tag = Git.create_git_version_tag(APISettings.GIT_STAGING_PRE_TAG) command_git = 'git ls-remote -t' command_awk = 'awk \'{print $2}\'' command_cut_1 = 'cut -d \'/\' -f 3' command_cut_2 = 'cut -d \'^\' -f 1' command_sort = 'sort -b -t . -k 1,1nr -k 2,2nr -k 3,3r -k 4,4r -k 5,5r' command_uniq = 'uniq' command = command_git + ' | ' + command_awk + ' | ' + command_cut_1 + ' | ' + \ command_cut_2 + ' | ' + command_sort + ' | ' + command_uniq list_of_tags = str(check_output(command, shell=True)) if staging_tag in list_of_tags: return True return False
This method will check, if the given tag exists as a staging tag in the remote repository. The intention is, that every tag, which should be deployed on a production envirnment, has to be deployed on a staging environment before.
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None: ''' Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all. ''' self.mglo.bind_to_uniform_block(binding, offset, size)
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def p_BIT_ix(p): """ asm : bitop expr COMMA reg8_I | bitop pexpr COMMA reg8_I """ bit = p[2].eval() if bit < 0 or bit > 7: error(p.lineno(3), 'Invalid bit position %i. Must be in [0..7]' % bit) p[0] = None return p[0] = Asm(p.lineno(3), '%s %i,%s' % (p[1], bit, p[4][0]), p[4][1])
asm : bitop expr COMMA reg8_I | bitop pexpr COMMA reg8_I
def create_chunker(self, chunk_size): """Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object. """ rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed) rolling_hash.set_threshold(1.0 / chunk_size) return RabinKarpCDC._Chunker(rolling_hash)
Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a specific, expected chunk size. Args: chunk_size (int): (Expected) target chunk size. Returns: BaseChunker: A chunker object.
def specify_data_set(self, x_input, y_input): """ Define input to ACE. Parameters ---------- x_input : list list of iterables, one for each independent variable y_input : array the dependent observations """ self.x = x_input self.y = y_input
Define input to ACE. Parameters ---------- x_input : list list of iterables, one for each independent variable y_input : array the dependent observations
def _edge_opposite_point(self, tri, i): """ Given a triangle, return the edge that is opposite point i. Vertexes are returned in the same orientation as in tri. """ ind = tri.index(i) return (tri[(ind+1) % 3], tri[(ind+2) % 3])
Given a triangle, return the edge that is opposite point i. Vertexes are returned in the same orientation as in tri.
def vperp(a, b): """ Find the component of a vector that is perpendicular to a second vector. All vectors are 3-dimensional. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vperp_c.html :param a: The vector whose orthogonal component is sought. :type a: 3-Element Array of floats :param b: The vector used as the orthogonal reference. :type b: 3-Element Array of floats :return: The component of a orthogonal to b. :rtype: 3-Element Array of floats """ a = stypes.toDoubleVector(a) b = stypes.toDoubleVector(b) vout = stypes.emptyDoubleVector(3) libspice.vperp_c(a, b, vout) return stypes.cVectorToPython(vout)
Find the component of a vector that is perpendicular to a second vector. All vectors are 3-dimensional. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vperp_c.html :param a: The vector whose orthogonal component is sought. :type a: 3-Element Array of floats :param b: The vector used as the orthogonal reference. :type b: 3-Element Array of floats :return: The component of a orthogonal to b. :rtype: 3-Element Array of floats
def aveknt(t, k): """Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). """ t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") n = t.shape[0] u = max(0, n - (k-1)) # number of elements in the output array out = np.empty( (u,), dtype=t.dtype ) for j in range(u): out[j] = sum( t[j:(j+k)] ) / k return out
Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``).
def hs_mux(sel, ls_hsi, hso): """ [Many-to-one] Multiplexes a list of input handshake interfaces sel - (i) selects an input handshake interface to be connected to the output ls_hsi - (i) list of input handshake tuples (ready, valid) hso - (o) output handshake tuple (ready, valid) """ N = len(ls_hsi) ls_hsi_rdy, ls_hsi_vld = zip(*ls_hsi) ls_hsi_rdy, ls_hsi_vld = list(ls_hsi_rdy), list(ls_hsi_vld) hso_rdy, hso_vld = hso @always_comb def _hsmux(): hso_vld.next = 0 for i in range(N): ls_hsi_rdy[i].next = 0 if i == sel: hso_vld.next = ls_hsi_vld[i] ls_hsi_rdy[i].next = hso_rdy return _hsmux
[Many-to-one] Multiplexes a list of input handshake interfaces sel - (i) selects an input handshake interface to be connected to the output ls_hsi - (i) list of input handshake tuples (ready, valid) hso - (o) output handshake tuple (ready, valid)
def get_info(self): """ Query the GenePattern server for metadata regarding this job and assign that metadata to the properties on this GPJob object. Including: * Task Name * LSID * User ID * Job Number * Status * Date Submitted * URL of Log Files * URL of Output Files * Number of Output Files """ request = urllib.request.Request(self.server_data.url + "/rest/v1/jobs/" + self.uri) if self.server_data.authorization_header() is not None: request.add_header('Authorization', self.server_data.authorization_header()) request.add_header('User-Agent', 'GenePatternRest') response = urllib.request.urlopen(request) self.json = response.read().decode('utf-8') self.info = json.loads(self.json) self.load_info()
Query the GenePattern server for metadata regarding this job and assign that metadata to the properties on this GPJob object. Including: * Task Name * LSID * User ID * Job Number * Status * Date Submitted * URL of Log Files * URL of Output Files * Number of Output Files
def DeleteMessageHandlerRequests(self, requests, cursor=None): """Deletes a list of message handler requests from the database.""" query = "DELETE FROM message_handler_requests WHERE request_id IN ({})" request_ids = set([r.request_id for r in requests]) query = query.format(",".join(["%s"] * len(request_ids))) cursor.execute(query, request_ids)
Deletes a list of message handler requests from the database.
def favorites_add(photo_id): """Add a photo to the user's favorites.""" method = 'flickr.favorites.add' _dopost(method, auth=True, photo_id=photo_id) return True
Add a photo to the user's favorites.
def GetFormatSpecification(cls): """Retrieves the format specification. Returns: FormatSpecification: format specification. """ format_specification = specification.FormatSpecification(cls.NAME) format_specification.AddNewSignature(b'SCCA', offset=4) format_specification.AddNewSignature(b'MAM\x04', offset=0) return format_specification
Retrieves the format specification. Returns: FormatSpecification: format specification.
def get_gc_book(self): """ Returns the GnuCash db session """ if not self.gc_book: gc_db = self.config.get(ConfigKeys.gnucash_book_path) if not gc_db: raise AttributeError("GnuCash book path not configured.") # check if this is the abs file exists if not os.path.isabs(gc_db): gc_db = resource_filename( Requirement.parse("Asset-Allocation"), gc_db) if not os.path.exists(gc_db): raise ValueError(f"Invalid GnuCash book path {gc_db}") self.gc_book = open_book(gc_db, open_if_lock=True) return self.gc_book
Returns the GnuCash db session
def execute_prebuild_script(self): """ Parse and execute the prebuild_script from the zappa_settings. """ (pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1) try: # Prefer prebuild script in working directory if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder (mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1) mod_folder_path_fragments = mod_folder_path.split('.') working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments) else: mod_name = pb_mod_path working_dir = os.getcwd() working_dir_importer = pkgutil.get_importer(working_dir) module_ = working_dir_importer.find_module(mod_name).load_module(mod_name) except (ImportError, AttributeError): try: # Prebuild func might be in virtualenv module_ = importlib.import_module(pb_mod_path) except ImportError: # pragma: no cover raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( "import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format( pb_mod_path=click.style(pb_mod_path, bold=True))) if not hasattr(module_, pb_func): # pragma: no cover raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( "find prebuild script ", bold=True) + 'function: "{pb_func}" '.format( pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format( pb_mod_path=pb_mod_path)) prebuild_function = getattr(module_, pb_func) prebuild_function()
Parse and execute the prebuild_script from the zappa_settings.
def genl_ctrl_resolve_grp(sk, family_name, grp_name): """Resolve Generic Netlink family group name. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L471 Looks up the family object and resolves the group name to the numeric group identifier. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family_name -- name of Generic Netlink family (bytes). grp_name -- name of group to resolve (bytes). Returns: The numeric group identifier or a negative error code. """ family = genl_ctrl_probe_by_name(sk, family_name) if family is None: return -NLE_OBJ_NOTFOUND return genl_ctrl_grp_by_name(family, grp_name)
Resolve Generic Netlink family group name. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L471 Looks up the family object and resolves the group name to the numeric group identifier. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family_name -- name of Generic Netlink family (bytes). grp_name -- name of group to resolve (bytes). Returns: The numeric group identifier or a negative error code.
def replace_termcodes(self, string, from_part=False, do_lt=True, special=True): r"""Replace any terminal code strings by byte sequences. The returned sequences are Nvim's internal representation of keys, for example: <esc> -> '\x1b' <cr> -> '\r' <c-l> -> '\x0c' <up> -> '\x80ku' The returned sequences can be used as input to `feedkeys`. """ return self.request('nvim_replace_termcodes', string, from_part, do_lt, special)
r"""Replace any terminal code strings by byte sequences. The returned sequences are Nvim's internal representation of keys, for example: <esc> -> '\x1b' <cr> -> '\r' <c-l> -> '\x0c' <up> -> '\x80ku' The returned sequences can be used as input to `feedkeys`.
def list_nodes_min(call=None): ''' Return a list of the VMs that are on the provider. Only a list of VM names and their state is returned. This is the minimum amount of information needed to check for existing VMs. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt-cloud -f list_nodes_min my-linode-config salt-cloud --function list_nodes_min my-linode-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) ret = {} nodes = _query('linode', 'list')['DATA'] for node in nodes: name = node['LABEL'] this_node = { 'id': six.text_type(node['LINODEID']), 'state': _get_status_descr_by_id(int(node['STATUS'])) } ret[name] = this_node return ret
Return a list of the VMs that are on the provider. Only a list of VM names and their state is returned. This is the minimum amount of information needed to check for existing VMs. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt-cloud -f list_nodes_min my-linode-config salt-cloud --function list_nodes_min my-linode-config
def get_all_current_trains(self, train_type=None, direction=None): """Returns all trains that are due to start in the next 10 minutes @param train_type: ['mainline', 'suburban', 'dart'] """ params = None if train_type: url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType' params = { 'TrainType': STATION_TYPE_TO_CODE_DICT[train_type] } else: url = self.api_base_url + 'getCurrentTrainsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_all_train_data(response.content) if direction is not None: return self._prune_trains(trains, direction=direction) return trains
Returns all trains that are due to start in the next 10 minutes @param train_type: ['mainline', 'suburban', 'dart']
def create_role(self, role_name, mount_point='approle', **kwargs): """POST /auth/<mount_point>/role/<role name> :param role_name: :type role_name: :param mount_point: :type mount_point: :param kwargs: :type kwargs: :return: :rtype: """ return self._adapter.post('/v1/auth/{0}/role/{1}'.format(mount_point, role_name), json=kwargs)
POST /auth/<mount_point>/role/<role name> :param role_name: :type role_name: :param mount_point: :type mount_point: :param kwargs: :type kwargs: :return: :rtype:
async def getRecentErrors(self, *args, **kwargs): """ Look up the most recent errors in the provisioner across all worker types Return a list of recent errors encountered This method gives output: ``v1/errors.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs)
Look up the most recent errors in the provisioner across all worker types Return a list of recent errors encountered This method gives output: ``v1/errors.json#`` This method is ``experimental``