code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def process_edge_flow(self, source, sink, i, j, algo, q): ''' API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise. ''' if (self.get_node_attr(i, 'distance') != self.get_node_attr(j, 'distance') + 1): return False if (i, j) in self.edge_attr: edge = (i, j) capacity = self.get_edge_attr(i, j, 'capacity') mult = 1 else: edge = (j, i) capacity = 0 mult = -1 flow = mult*self.edge_attr[edge]['flow'] residual_capacity = capacity - flow if residual_capacity == 0: return False excess_i = self.get_node_attr(i, 'excess') excess_j = self.get_node_attr(j, 'excess') push_amount = min(excess_i, residual_capacity) self.edge_attr[edge]['flow'] = mult*(flow + push_amount) self.set_node_attr(i, 'excess', excess_i - push_amount) self.set_node_attr(j, 'excess', excess_j + push_amount) return True
def function[process_edge_flow, parameter[self, source, sink, i, j, algo, q]]: constant[ API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise. ] if compare[call[name[self].get_node_attr, parameter[name[i], constant[distance]]] not_equal[!=] binary_operation[call[name[self].get_node_attr, parameter[name[j], constant[distance]]] + constant[1]]] begin[:] return[constant[False]] if compare[tuple[[<ast.Name object at 0x7da1b054b5b0>, <ast.Name object at 0x7da1b054b5e0>]] in name[self].edge_attr] begin[:] variable[edge] assign[=] tuple[[<ast.Name object at 0x7da1b0549bd0>, <ast.Name object at 0x7da1b0549ba0>]] variable[capacity] assign[=] call[name[self].get_edge_attr, parameter[name[i], name[j], constant[capacity]]] variable[mult] assign[=] constant[1] variable[flow] assign[=] binary_operation[name[mult] * call[call[name[self].edge_attr][name[edge]]][constant[flow]]] variable[residual_capacity] assign[=] binary_operation[name[capacity] - name[flow]] if compare[name[residual_capacity] equal[==] constant[0]] begin[:] return[constant[False]] variable[excess_i] assign[=] call[name[self].get_node_attr, parameter[name[i], constant[excess]]] variable[excess_j] assign[=] call[name[self].get_node_attr, parameter[name[j], constant[excess]]] variable[push_amount] assign[=] call[name[min], parameter[name[excess_i], name[residual_capacity]]] call[call[name[self].edge_attr][name[edge]]][constant[flow]] assign[=] binary_operation[name[mult] * binary_operation[name[flow] + name[push_amount]]] call[name[self].set_node_attr, parameter[name[i], constant[excess], binary_operation[name[excess_i] - name[push_amount]]]] call[name[self].set_node_attr, parameter[name[j], constant[excess], binary_operation[name[excess_j] + name[push_amount]]]] return[constant[True]]
keyword[def] identifier[process_edge_flow] ( identifier[self] , identifier[source] , identifier[sink] , identifier[i] , identifier[j] , identifier[algo] , identifier[q] ): literal[string] keyword[if] ( identifier[self] . identifier[get_node_attr] ( identifier[i] , literal[string] )!= identifier[self] . identifier[get_node_attr] ( identifier[j] , literal[string] )+ literal[int] ): keyword[return] keyword[False] keyword[if] ( identifier[i] , identifier[j] ) keyword[in] identifier[self] . identifier[edge_attr] : identifier[edge] =( identifier[i] , identifier[j] ) identifier[capacity] = identifier[self] . identifier[get_edge_attr] ( identifier[i] , identifier[j] , literal[string] ) identifier[mult] = literal[int] keyword[else] : identifier[edge] =( identifier[j] , identifier[i] ) identifier[capacity] = literal[int] identifier[mult] =- literal[int] identifier[flow] = identifier[mult] * identifier[self] . identifier[edge_attr] [ identifier[edge] ][ literal[string] ] identifier[residual_capacity] = identifier[capacity] - identifier[flow] keyword[if] identifier[residual_capacity] == literal[int] : keyword[return] keyword[False] identifier[excess_i] = identifier[self] . identifier[get_node_attr] ( identifier[i] , literal[string] ) identifier[excess_j] = identifier[self] . identifier[get_node_attr] ( identifier[j] , literal[string] ) identifier[push_amount] = identifier[min] ( identifier[excess_i] , identifier[residual_capacity] ) identifier[self] . identifier[edge_attr] [ identifier[edge] ][ literal[string] ]= identifier[mult] *( identifier[flow] + identifier[push_amount] ) identifier[self] . identifier[set_node_attr] ( identifier[i] , literal[string] , identifier[excess_i] - identifier[push_amount] ) identifier[self] . identifier[set_node_attr] ( identifier[j] , literal[string] , identifier[excess_j] + identifier[push_amount] ) keyword[return] keyword[True]
def process_edge_flow(self, source, sink, i, j, algo, q): """ API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise. """ if self.get_node_attr(i, 'distance') != self.get_node_attr(j, 'distance') + 1: return False # depends on [control=['if'], data=[]] if (i, j) in self.edge_attr: edge = (i, j) capacity = self.get_edge_attr(i, j, 'capacity') mult = 1 # depends on [control=['if'], data=[]] else: edge = (j, i) capacity = 0 mult = -1 flow = mult * self.edge_attr[edge]['flow'] residual_capacity = capacity - flow if residual_capacity == 0: return False # depends on [control=['if'], data=[]] excess_i = self.get_node_attr(i, 'excess') excess_j = self.get_node_attr(j, 'excess') push_amount = min(excess_i, residual_capacity) self.edge_attr[edge]['flow'] = mult * (flow + push_amount) self.set_node_attr(i, 'excess', excess_i - push_amount) self.set_node_attr(j, 'excess', excess_j + push_amount) return True
def reinit(self): """Use carefully to reset the episode count to 0.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.server, self.port)) self._hello(sock) comms.send_message(sock, ("<Init>" + self._get_token() + "</Init>").encode()) reply = comms.recv_message(sock) sock.close() ok, = struct.unpack('!I', reply) return ok != 0
def function[reinit, parameter[self]]: constant[Use carefully to reset the episode count to 0.] variable[sock] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_STREAM]] call[name[sock].connect, parameter[tuple[[<ast.Attribute object at 0x7da1b1b11d20>, <ast.Attribute object at 0x7da1b1b10b80>]]]] call[name[self]._hello, parameter[name[sock]]] call[name[comms].send_message, parameter[name[sock], call[binary_operation[binary_operation[constant[<Init>] + call[name[self]._get_token, parameter[]]] + constant[</Init>]].encode, parameter[]]]] variable[reply] assign[=] call[name[comms].recv_message, parameter[name[sock]]] call[name[sock].close, parameter[]] <ast.Tuple object at 0x7da1b1a49960> assign[=] call[name[struct].unpack, parameter[constant[!I], name[reply]]] return[compare[name[ok] not_equal[!=] constant[0]]]
keyword[def] identifier[reinit] ( identifier[self] ): literal[string] identifier[sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] ) identifier[sock] . identifier[connect] (( identifier[self] . identifier[server] , identifier[self] . identifier[port] )) identifier[self] . identifier[_hello] ( identifier[sock] ) identifier[comms] . identifier[send_message] ( identifier[sock] ,( literal[string] + identifier[self] . identifier[_get_token] ()+ literal[string] ). identifier[encode] ()) identifier[reply] = identifier[comms] . identifier[recv_message] ( identifier[sock] ) identifier[sock] . identifier[close] () identifier[ok] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[reply] ) keyword[return] identifier[ok] != literal[int]
def reinit(self): """Use carefully to reset the episode count to 0.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.server, self.port)) self._hello(sock) comms.send_message(sock, ('<Init>' + self._get_token() + '</Init>').encode()) reply = comms.recv_message(sock) sock.close() (ok,) = struct.unpack('!I', reply) return ok != 0
def _graphic(self): """ Adds the correct graphic options depending of the OS """ if sys.platform.startswith("win"): return [] if len(os.environ.get("DISPLAY", "")) > 0: return [] if "-nographic" not in self._options: return ["-nographic"] return []
def function[_graphic, parameter[self]]: constant[ Adds the correct graphic options depending of the OS ] if call[name[sys].platform.startswith, parameter[constant[win]]] begin[:] return[list[[]]] if compare[call[name[len], parameter[call[name[os].environ.get, parameter[constant[DISPLAY], constant[]]]]] greater[>] constant[0]] begin[:] return[list[[]]] if compare[constant[-nographic] <ast.NotIn object at 0x7da2590d7190> name[self]._options] begin[:] return[list[[<ast.Constant object at 0x7da2044c1120>]]] return[list[[]]]
keyword[def] identifier[_graphic] ( identifier[self] ): literal[string] keyword[if] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ): keyword[return] [] keyword[if] identifier[len] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ))> literal[int] : keyword[return] [] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_options] : keyword[return] [ literal[string] ] keyword[return] []
def _graphic(self): """ Adds the correct graphic options depending of the OS """ if sys.platform.startswith('win'): return [] # depends on [control=['if'], data=[]] if len(os.environ.get('DISPLAY', '')) > 0: return [] # depends on [control=['if'], data=[]] if '-nographic' not in self._options: return ['-nographic'] # depends on [control=['if'], data=[]] return []
def take_snapshot(self, name, wait=True): """ Take a snapshot of this droplet (must be powered off) Parameters ---------- name: str Name of the snapshot wait: bool, default True Whether to block until the pending action is completed """ return self._action('snapshot', name=name, wait=wait)
def function[take_snapshot, parameter[self, name, wait]]: constant[ Take a snapshot of this droplet (must be powered off) Parameters ---------- name: str Name of the snapshot wait: bool, default True Whether to block until the pending action is completed ] return[call[name[self]._action, parameter[constant[snapshot]]]]
keyword[def] identifier[take_snapshot] ( identifier[self] , identifier[name] , identifier[wait] = keyword[True] ): literal[string] keyword[return] identifier[self] . identifier[_action] ( literal[string] , identifier[name] = identifier[name] , identifier[wait] = identifier[wait] )
def take_snapshot(self, name, wait=True): """ Take a snapshot of this droplet (must be powered off) Parameters ---------- name: str Name of the snapshot wait: bool, default True Whether to block until the pending action is completed """ return self._action('snapshot', name=name, wait=wait)
def _apply_advanced_config(config_spec, advanced_config, vm_extra_config=None): ''' Sets configuration parameters for the vm config_spec vm.ConfigSpec object advanced_config config key value pairs vm_extra_config Virtual machine vm_ref.config.extraConfig object ''' log.trace('Configuring advanced configuration ' 'parameters %s', advanced_config) if isinstance(advanced_config, str): raise salt.exceptions.ArgumentValueError( 'The specified \'advanced_configs\' configuration ' 'option cannot be parsed, please check the parameters') for key, value in six.iteritems(advanced_config): if vm_extra_config: for option in vm_extra_config: if option.key == key and option.value == str(value): continue else: option = vim.option.OptionValue(key=key, value=value) config_spec.extraConfig.append(option)
def function[_apply_advanced_config, parameter[config_spec, advanced_config, vm_extra_config]]: constant[ Sets configuration parameters for the vm config_spec vm.ConfigSpec object advanced_config config key value pairs vm_extra_config Virtual machine vm_ref.config.extraConfig object ] call[name[log].trace, parameter[constant[Configuring advanced configuration parameters %s], name[advanced_config]]] if call[name[isinstance], parameter[name[advanced_config], name[str]]] begin[:] <ast.Raise object at 0x7da2047eb2b0> for taget[tuple[[<ast.Name object at 0x7da2047ea170>, <ast.Name object at 0x7da2047eb700>]]] in starred[call[name[six].iteritems, parameter[name[advanced_config]]]] begin[:] if name[vm_extra_config] begin[:] for taget[name[option]] in starred[name[vm_extra_config]] begin[:] if <ast.BoolOp object at 0x7da2047eb460> begin[:] continue
keyword[def] identifier[_apply_advanced_config] ( identifier[config_spec] , identifier[advanced_config] , identifier[vm_extra_config] = keyword[None] ): literal[string] identifier[log] . identifier[trace] ( literal[string] literal[string] , identifier[advanced_config] ) keyword[if] identifier[isinstance] ( identifier[advanced_config] , identifier[str] ): keyword[raise] identifier[salt] . identifier[exceptions] . identifier[ArgumentValueError] ( literal[string] literal[string] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[advanced_config] ): keyword[if] identifier[vm_extra_config] : keyword[for] identifier[option] keyword[in] identifier[vm_extra_config] : keyword[if] identifier[option] . identifier[key] == identifier[key] keyword[and] identifier[option] . identifier[value] == identifier[str] ( identifier[value] ): keyword[continue] keyword[else] : identifier[option] = identifier[vim] . identifier[option] . identifier[OptionValue] ( identifier[key] = identifier[key] , identifier[value] = identifier[value] ) identifier[config_spec] . identifier[extraConfig] . identifier[append] ( identifier[option] )
def _apply_advanced_config(config_spec, advanced_config, vm_extra_config=None): """ Sets configuration parameters for the vm config_spec vm.ConfigSpec object advanced_config config key value pairs vm_extra_config Virtual machine vm_ref.config.extraConfig object """ log.trace('Configuring advanced configuration parameters %s', advanced_config) if isinstance(advanced_config, str): raise salt.exceptions.ArgumentValueError("The specified 'advanced_configs' configuration option cannot be parsed, please check the parameters") # depends on [control=['if'], data=[]] for (key, value) in six.iteritems(advanced_config): if vm_extra_config: for option in vm_extra_config: if option.key == key and option.value == str(value): continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['option']] # depends on [control=['if'], data=[]] else: option = vim.option.OptionValue(key=key, value=value) config_spec.extraConfig.append(option) # depends on [control=['for'], data=[]]
def get_consensus(block): """Calculate a simple consensus sequence for the block.""" from collections import Counter # Take aligned (non-insert) chars from all rows; transpose columns = zip(*[[c for c in row['seq'] if not c.islower()] for row in block['sequences']]) cons_chars = [Counter(col).most_common()[0][0] for col in columns] cons_chars = [c if c != '-' else 'X' for c in cons_chars] assert len(cons_chars) == block['query_length'] cons_sequence = { 'index': 1, 'id': 'consensus', 'description': '', 'dbxrefs': {}, 'phylum': '', 'taxchar': '', 'head_len': None, 'tail_len': None, 'head_seq': '', 'tail_seq': '', 'length': block['query_length'], 'seq': ''.join(cons_chars), } return cons_sequence
def function[get_consensus, parameter[block]]: constant[Calculate a simple consensus sequence for the block.] from relative_module[collections] import module[Counter] variable[columns] assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18fe90eb0>]] variable[cons_chars] assign[=] <ast.ListComp object at 0x7da18fe90fa0> variable[cons_chars] assign[=] <ast.ListComp object at 0x7da18fe90b50> assert[compare[call[name[len], parameter[name[cons_chars]]] equal[==] call[name[block]][constant[query_length]]]] variable[cons_sequence] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93c40>, <ast.Constant object at 0x7da18fe900d0>, <ast.Constant object at 0x7da18fe92320>, <ast.Constant object at 0x7da18fe90880>, <ast.Constant object at 0x7da18fe90d30>, <ast.Constant object at 0x7da18fe90bb0>, <ast.Constant object at 0x7da18fe92dd0>, <ast.Constant object at 0x7da18fe90f70>, <ast.Constant object at 0x7da18fe93fa0>, <ast.Constant object at 0x7da18fe937f0>, <ast.Constant object at 0x7da18fe93af0>, <ast.Constant object at 0x7da18fe939a0>], [<ast.Constant object at 0x7da18fe91f90>, <ast.Constant object at 0x7da2047e8cd0>, <ast.Constant object at 0x7da2047eaa40>, <ast.Dict object at 0x7da2047eb880>, <ast.Constant object at 0x7da2047e90f0>, <ast.Constant object at 0x7da2047ebb50>, <ast.Constant object at 0x7da2047e8af0>, <ast.Constant object at 0x7da2047e9ae0>, <ast.Constant object at 0x7da2047e84c0>, <ast.Constant object at 0x7da2047eb1c0>, <ast.Subscript object at 0x7da2047e8880>, <ast.Call object at 0x7da2047eaa70>]] return[name[cons_sequence]]
keyword[def] identifier[get_consensus] ( identifier[block] ): literal[string] keyword[from] identifier[collections] keyword[import] identifier[Counter] identifier[columns] = identifier[zip] (*[[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[row] [ literal[string] ] keyword[if] keyword[not] identifier[c] . identifier[islower] ()] keyword[for] identifier[row] keyword[in] identifier[block] [ literal[string] ]]) identifier[cons_chars] =[ identifier[Counter] ( identifier[col] ). identifier[most_common] ()[ literal[int] ][ literal[int] ] keyword[for] identifier[col] keyword[in] identifier[columns] ] identifier[cons_chars] =[ identifier[c] keyword[if] identifier[c] != literal[string] keyword[else] literal[string] keyword[for] identifier[c] keyword[in] identifier[cons_chars] ] keyword[assert] identifier[len] ( identifier[cons_chars] )== identifier[block] [ literal[string] ] identifier[cons_sequence] ={ literal[string] : literal[int] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :{}, literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[block] [ literal[string] ], literal[string] : literal[string] . identifier[join] ( identifier[cons_chars] ), } keyword[return] identifier[cons_sequence]
def get_consensus(block): """Calculate a simple consensus sequence for the block.""" from collections import Counter # Take aligned (non-insert) chars from all rows; transpose columns = zip(*[[c for c in row['seq'] if not c.islower()] for row in block['sequences']]) cons_chars = [Counter(col).most_common()[0][0] for col in columns] cons_chars = [c if c != '-' else 'X' for c in cons_chars] assert len(cons_chars) == block['query_length'] cons_sequence = {'index': 1, 'id': 'consensus', 'description': '', 'dbxrefs': {}, 'phylum': '', 'taxchar': '', 'head_len': None, 'tail_len': None, 'head_seq': '', 'tail_seq': '', 'length': block['query_length'], 'seq': ''.join(cons_chars)} return cons_sequence
def _correlation_computation(self, task): """Use BLAS API to do correlation computation (matrix multiplication). Parameters ---------- task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels] the correlation values of all subjects in all epochs for the assigned values, in row-major corr[i, e, s + j] = corr[j, e, s + i] """ time1 = time.time() s = task[0] nEpochs = len(self.raw_data) logger.debug( 'start to compute the correlation: #epochs: %d, ' '#processed voxels: %d, #total voxels to compute against: %d' % (nEpochs, task[1], self.num_voxels2) ) corr = np.zeros((task[1], nEpochs, self.num_voxels2), np.float32, order='C') count = 0 for i in range(len(self.raw_data)): mat = self.raw_data[i] mat2 = self.raw_data2[i] if self.raw_data2 is not None else mat no_trans = 'N' trans = 'T' blas.compute_self_corr_for_voxel_sel(no_trans, trans, self.num_voxels2, task[1], mat.shape[0], 1.0, mat2, self.num_voxels2, s, mat, self.num_voxels, 0.0, corr, self.num_voxels2 * nEpochs, count) count += 1 time2 = time.time() logger.debug( 'correlation computation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return corr
def function[_correlation_computation, parameter[self, task]]: constant[Use BLAS API to do correlation computation (matrix multiplication). Parameters ---------- task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels] the correlation values of all subjects in all epochs for the assigned values, in row-major corr[i, e, s + j] = corr[j, e, s + i] ] variable[time1] assign[=] call[name[time].time, parameter[]] variable[s] assign[=] call[name[task]][constant[0]] variable[nEpochs] assign[=] call[name[len], parameter[name[self].raw_data]] call[name[logger].debug, parameter[binary_operation[constant[start to compute the correlation: #epochs: %d, #processed voxels: %d, #total voxels to compute against: %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0730f10>, <ast.Subscript object at 0x7da1b0730dc0>, <ast.Attribute object at 0x7da1b07325c0>]]]]] variable[corr] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b0732d10>, <ast.Name object at 0x7da1b0732ce0>, <ast.Attribute object at 0x7da1b0732a70>]], name[np].float32]] variable[count] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].raw_data]]]]] begin[:] variable[mat] assign[=] call[name[self].raw_data][name[i]] variable[mat2] assign[=] <ast.IfExp object at 0x7da2045676a0> variable[no_trans] assign[=] constant[N] variable[trans] assign[=] constant[T] call[name[blas].compute_self_corr_for_voxel_sel, parameter[name[no_trans], name[trans], name[self].num_voxels2, call[name[task]][constant[1]], call[name[mat].shape][constant[0]], constant[1.0], name[mat2], name[self].num_voxels2, name[s], name[mat], name[self].num_voxels, constant[0.0], name[corr], binary_operation[name[self].num_voxels2 * name[nEpochs]], name[count]]] <ast.AugAssign object at 0x7da204565b40> variable[time2] assign[=] call[name[time].time, parameter[]] call[name[logger].debug, parameter[binary_operation[constant[correlation computation for %d voxels, takes %.2f s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da204566b30>, <ast.BinOp object at 0x7da204565540>]]]]] return[name[corr]]
keyword[def] identifier[_correlation_computation] ( identifier[self] , identifier[task] ): literal[string] identifier[time1] = identifier[time] . identifier[time] () identifier[s] = identifier[task] [ literal[int] ] identifier[nEpochs] = identifier[len] ( identifier[self] . identifier[raw_data] ) identifier[logger] . identifier[debug] ( literal[string] literal[string] % ( identifier[nEpochs] , identifier[task] [ literal[int] ], identifier[self] . identifier[num_voxels2] ) ) identifier[corr] = identifier[np] . identifier[zeros] (( identifier[task] [ literal[int] ], identifier[nEpochs] , identifier[self] . identifier[num_voxels2] ), identifier[np] . identifier[float32] , identifier[order] = literal[string] ) identifier[count] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[raw_data] )): identifier[mat] = identifier[self] . identifier[raw_data] [ identifier[i] ] identifier[mat2] = identifier[self] . identifier[raw_data2] [ identifier[i] ] keyword[if] identifier[self] . identifier[raw_data2] keyword[is] keyword[not] keyword[None] keyword[else] identifier[mat] identifier[no_trans] = literal[string] identifier[trans] = literal[string] identifier[blas] . identifier[compute_self_corr_for_voxel_sel] ( identifier[no_trans] , identifier[trans] , identifier[self] . identifier[num_voxels2] , identifier[task] [ literal[int] ], identifier[mat] . identifier[shape] [ literal[int] ], literal[int] , identifier[mat2] , identifier[self] . identifier[num_voxels2] , identifier[s] , identifier[mat] , identifier[self] . identifier[num_voxels] , literal[int] , identifier[corr] , identifier[self] . identifier[num_voxels2] * identifier[nEpochs] , identifier[count] ) identifier[count] += literal[int] identifier[time2] = identifier[time] . identifier[time] () identifier[logger] . identifier[debug] ( literal[string] % ( identifier[task] [ literal[int] ],( identifier[time2] - identifier[time1] )) ) keyword[return] identifier[corr]
def _correlation_computation(self, task): """Use BLAS API to do correlation computation (matrix multiplication). Parameters ---------- task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels] the correlation values of all subjects in all epochs for the assigned values, in row-major corr[i, e, s + j] = corr[j, e, s + i] """ time1 = time.time() s = task[0] nEpochs = len(self.raw_data) logger.debug('start to compute the correlation: #epochs: %d, #processed voxels: %d, #total voxels to compute against: %d' % (nEpochs, task[1], self.num_voxels2)) corr = np.zeros((task[1], nEpochs, self.num_voxels2), np.float32, order='C') count = 0 for i in range(len(self.raw_data)): mat = self.raw_data[i] mat2 = self.raw_data2[i] if self.raw_data2 is not None else mat no_trans = 'N' trans = 'T' blas.compute_self_corr_for_voxel_sel(no_trans, trans, self.num_voxels2, task[1], mat.shape[0], 1.0, mat2, self.num_voxels2, s, mat, self.num_voxels, 0.0, corr, self.num_voxels2 * nEpochs, count) count += 1 # depends on [control=['for'], data=['i']] time2 = time.time() logger.debug('correlation computation for %d voxels, takes %.2f s' % (task[1], time2 - time1)) return corr
def _load_char(self, char): """Build and store a glyph corresponding to an individual character Parameters ---------- char : str A single character to be represented. """ assert isinstance(char, string_types) and len(char) == 1 assert char not in self._glyphs # load new glyph data from font _load_glyph(self._font, char, self._glyphs) # put new glyph into the texture glyph = self._glyphs[char] bitmap = glyph['bitmap'] # convert to padded array data = np.zeros((bitmap.shape[0] + 2*self._spread, bitmap.shape[1] + 2*self._spread), np.uint8) data[self._spread:-self._spread, self._spread:-self._spread] = bitmap # Store, while scaling down to proper size height = data.shape[0] // self.ratio width = data.shape[1] // self.ratio region = self._atlas.get_free_region(width + 2, height + 2) if region is None: raise RuntimeError('Cannot store glyph') x, y, w, h = region x, y, w, h = x + 1, y + 1, w - 2, h - 2 self._renderer.render_to_texture(data, self._atlas, (x, y), (w, h)) u0 = x / float(self._atlas.shape[1]) v0 = y / float(self._atlas.shape[0]) u1 = (x+w) / float(self._atlas.shape[1]) v1 = (y+h) / float(self._atlas.shape[0]) texcoords = (u0, v0, u1, v1) glyph.update(dict(size=(w, h), texcoords=texcoords))
def function[_load_char, parameter[self, char]]: constant[Build and store a glyph corresponding to an individual character Parameters ---------- char : str A single character to be represented. ] assert[<ast.BoolOp object at 0x7da1b0e49d50>] assert[compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[self]._glyphs]] call[name[_load_glyph], parameter[name[self]._font, name[char], name[self]._glyphs]] variable[glyph] assign[=] call[name[self]._glyphs][name[char]] variable[bitmap] assign[=] call[name[glyph]][constant[bitmap]] variable[data] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b0ff8340>, <ast.BinOp object at 0x7da1b0ff8a90>]], name[np].uint8]] call[name[data]][tuple[[<ast.Slice object at 0x7da1b0ff82e0>, <ast.Slice object at 0x7da1b0ff9060>]]] assign[=] name[bitmap] variable[height] assign[=] binary_operation[call[name[data].shape][constant[0]] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].ratio] variable[width] assign[=] binary_operation[call[name[data].shape][constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].ratio] variable[region] assign[=] call[name[self]._atlas.get_free_region, parameter[binary_operation[name[width] + constant[2]], binary_operation[name[height] + constant[2]]]] if compare[name[region] is constant[None]] begin[:] <ast.Raise object at 0x7da1b0ea04f0> <ast.Tuple object at 0x7da1b0ea00a0> assign[=] name[region] <ast.Tuple object at 0x7da1b0ea1630> assign[=] tuple[[<ast.BinOp object at 0x7da1b0ea0ee0>, <ast.BinOp object at 0x7da1b0ea0160>, <ast.BinOp object at 0x7da1b0ea06a0>, <ast.BinOp object at 0x7da1b0ea0310>]] call[name[self]._renderer.render_to_texture, parameter[name[data], name[self]._atlas, tuple[[<ast.Name object at 0x7da1b0ea0e80>, <ast.Name object at 0x7da1b0ea28c0>]], tuple[[<ast.Name object at 0x7da1b0ea1720>, <ast.Name object at 0x7da1b0ea0940>]]]] variable[u0] assign[=] binary_operation[name[x] / call[name[float], parameter[call[name[self]._atlas.shape][constant[1]]]]] variable[v0] assign[=] binary_operation[name[y] / call[name[float], parameter[call[name[self]._atlas.shape][constant[0]]]]] variable[u1] assign[=] binary_operation[binary_operation[name[x] + name[w]] / call[name[float], parameter[call[name[self]._atlas.shape][constant[1]]]]] variable[v1] assign[=] binary_operation[binary_operation[name[y] + name[h]] / call[name[float], parameter[call[name[self]._atlas.shape][constant[0]]]]] variable[texcoords] assign[=] tuple[[<ast.Name object at 0x7da1b0ea1330>, <ast.Name object at 0x7da1b0ea1060>, <ast.Name object at 0x7da1b0ea0190>, <ast.Name object at 0x7da1b0ea1270>]] call[name[glyph].update, parameter[call[name[dict], parameter[]]]]
keyword[def] identifier[_load_char] ( identifier[self] , identifier[char] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[char] , identifier[string_types] ) keyword[and] identifier[len] ( identifier[char] )== literal[int] keyword[assert] identifier[char] keyword[not] keyword[in] identifier[self] . identifier[_glyphs] identifier[_load_glyph] ( identifier[self] . identifier[_font] , identifier[char] , identifier[self] . identifier[_glyphs] ) identifier[glyph] = identifier[self] . identifier[_glyphs] [ identifier[char] ] identifier[bitmap] = identifier[glyph] [ literal[string] ] identifier[data] = identifier[np] . identifier[zeros] (( identifier[bitmap] . identifier[shape] [ literal[int] ]+ literal[int] * identifier[self] . identifier[_spread] , identifier[bitmap] . identifier[shape] [ literal[int] ]+ literal[int] * identifier[self] . identifier[_spread] ), identifier[np] . identifier[uint8] ) identifier[data] [ identifier[self] . identifier[_spread] :- identifier[self] . identifier[_spread] , identifier[self] . identifier[_spread] :- identifier[self] . identifier[_spread] ]= identifier[bitmap] identifier[height] = identifier[data] . identifier[shape] [ literal[int] ]// identifier[self] . identifier[ratio] identifier[width] = identifier[data] . identifier[shape] [ literal[int] ]// identifier[self] . identifier[ratio] identifier[region] = identifier[self] . identifier[_atlas] . identifier[get_free_region] ( identifier[width] + literal[int] , identifier[height] + literal[int] ) keyword[if] identifier[region] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[x] , identifier[y] , identifier[w] , identifier[h] = identifier[region] identifier[x] , identifier[y] , identifier[w] , identifier[h] = identifier[x] + literal[int] , identifier[y] + literal[int] , identifier[w] - literal[int] , identifier[h] - literal[int] identifier[self] . identifier[_renderer] . identifier[render_to_texture] ( identifier[data] , identifier[self] . identifier[_atlas] ,( identifier[x] , identifier[y] ),( identifier[w] , identifier[h] )) identifier[u0] = identifier[x] / identifier[float] ( identifier[self] . identifier[_atlas] . identifier[shape] [ literal[int] ]) identifier[v0] = identifier[y] / identifier[float] ( identifier[self] . identifier[_atlas] . identifier[shape] [ literal[int] ]) identifier[u1] =( identifier[x] + identifier[w] )/ identifier[float] ( identifier[self] . identifier[_atlas] . identifier[shape] [ literal[int] ]) identifier[v1] =( identifier[y] + identifier[h] )/ identifier[float] ( identifier[self] . identifier[_atlas] . identifier[shape] [ literal[int] ]) identifier[texcoords] =( identifier[u0] , identifier[v0] , identifier[u1] , identifier[v1] ) identifier[glyph] . identifier[update] ( identifier[dict] ( identifier[size] =( identifier[w] , identifier[h] ), identifier[texcoords] = identifier[texcoords] ))
def _load_char(self, char): """Build and store a glyph corresponding to an individual character Parameters ---------- char : str A single character to be represented. """ assert isinstance(char, string_types) and len(char) == 1 assert char not in self._glyphs # load new glyph data from font _load_glyph(self._font, char, self._glyphs) # put new glyph into the texture glyph = self._glyphs[char] bitmap = glyph['bitmap'] # convert to padded array data = np.zeros((bitmap.shape[0] + 2 * self._spread, bitmap.shape[1] + 2 * self._spread), np.uint8) data[self._spread:-self._spread, self._spread:-self._spread] = bitmap # Store, while scaling down to proper size height = data.shape[0] // self.ratio width = data.shape[1] // self.ratio region = self._atlas.get_free_region(width + 2, height + 2) if region is None: raise RuntimeError('Cannot store glyph') # depends on [control=['if'], data=[]] (x, y, w, h) = region (x, y, w, h) = (x + 1, y + 1, w - 2, h - 2) self._renderer.render_to_texture(data, self._atlas, (x, y), (w, h)) u0 = x / float(self._atlas.shape[1]) v0 = y / float(self._atlas.shape[0]) u1 = (x + w) / float(self._atlas.shape[1]) v1 = (y + h) / float(self._atlas.shape[0]) texcoords = (u0, v0, u1, v1) glyph.update(dict(size=(w, h), texcoords=texcoords))
def encode(cls, value): """Encodes a value into bencoded bytes. :param value: Python object to be encoded (str, int, list, dict). :param str val_encoding: Encoding used by strings in a given object. :rtype: bytes """ val_encoding = 'utf-8' def encode_str(v): try: v_enc = encode(v, val_encoding) except UnicodeDecodeError: if PY3: raise else: # Suppose bytestring v_enc = v prefix = encode('%s:' % len(v_enc), val_encoding) return prefix + v_enc def encode_(val): if isinstance(val, str_type): result = encode_str(val) elif isinstance(val, int_types): result = encode(('i%se' % val), val_encoding) elif isinstance(val, (list, set, tuple)): result = encode('l', val_encoding) for item in val: result += encode_(item) result += encode('e', val_encoding) elif isinstance(val, dict): result = encode('d', val_encoding) # Dictionaries are expected to be sorted by key. for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items(): result += (encode_str(k) + encode_(v)) result += encode('e', val_encoding) elif isinstance(val, byte_types): result = encode('%s:' % len(val), val_encoding) result += val else: raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val)) return result return encode_(value)
def function[encode, parameter[cls, value]]: constant[Encodes a value into bencoded bytes. :param value: Python object to be encoded (str, int, list, dict). :param str val_encoding: Encoding used by strings in a given object. :rtype: bytes ] variable[val_encoding] assign[=] constant[utf-8] def function[encode_str, parameter[v]]: <ast.Try object at 0x7da1b04986d0> variable[prefix] assign[=] call[name[encode], parameter[binary_operation[constant[%s:] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[v_enc]]]], name[val_encoding]]] return[binary_operation[name[prefix] + name[v_enc]]] def function[encode_, parameter[val]]: if call[name[isinstance], parameter[name[val], name[str_type]]] begin[:] variable[result] assign[=] call[name[encode_str], parameter[name[val]]] return[name[result]] return[call[name[encode_], parameter[name[value]]]]
keyword[def] identifier[encode] ( identifier[cls] , identifier[value] ): literal[string] identifier[val_encoding] = literal[string] keyword[def] identifier[encode_str] ( identifier[v] ): keyword[try] : identifier[v_enc] = identifier[encode] ( identifier[v] , identifier[val_encoding] ) keyword[except] identifier[UnicodeDecodeError] : keyword[if] identifier[PY3] : keyword[raise] keyword[else] : identifier[v_enc] = identifier[v] identifier[prefix] = identifier[encode] ( literal[string] % identifier[len] ( identifier[v_enc] ), identifier[val_encoding] ) keyword[return] identifier[prefix] + identifier[v_enc] keyword[def] identifier[encode_] ( identifier[val] ): keyword[if] identifier[isinstance] ( identifier[val] , identifier[str_type] ): identifier[result] = identifier[encode_str] ( identifier[val] ) keyword[elif] identifier[isinstance] ( identifier[val] , identifier[int_types] ): identifier[result] = identifier[encode] (( literal[string] % identifier[val] ), identifier[val_encoding] ) keyword[elif] identifier[isinstance] ( identifier[val] ,( identifier[list] , identifier[set] , identifier[tuple] )): identifier[result] = identifier[encode] ( literal[string] , identifier[val_encoding] ) keyword[for] identifier[item] keyword[in] identifier[val] : identifier[result] += identifier[encode_] ( identifier[item] ) identifier[result] += identifier[encode] ( literal[string] , identifier[val_encoding] ) keyword[elif] identifier[isinstance] ( identifier[val] , identifier[dict] ): identifier[result] = identifier[encode] ( literal[string] , identifier[val_encoding] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[OrderedDict] ( identifier[sorted] ( identifier[val] . identifier[items] (), identifier[key] = identifier[itemgetter] ( literal[int] ))). identifier[items] (): identifier[result] +=( identifier[encode_str] ( identifier[k] )+ identifier[encode_] ( identifier[v] )) identifier[result] += identifier[encode] ( literal[string] , identifier[val_encoding] ) keyword[elif] identifier[isinstance] ( identifier[val] , identifier[byte_types] ): identifier[result] = identifier[encode] ( literal[string] % identifier[len] ( identifier[val] ), identifier[val_encoding] ) identifier[result] += identifier[val] keyword[else] : keyword[raise] identifier[BencodeEncodingError] ( literal[string] %( identifier[type] ( identifier[val] ), identifier[val] )) keyword[return] identifier[result] keyword[return] identifier[encode_] ( identifier[value] )
def encode(cls, value): """Encodes a value into bencoded bytes. :param value: Python object to be encoded (str, int, list, dict). :param str val_encoding: Encoding used by strings in a given object. :rtype: bytes """ val_encoding = 'utf-8' def encode_str(v): try: v_enc = encode(v, val_encoding) # depends on [control=['try'], data=[]] except UnicodeDecodeError: if PY3: raise # depends on [control=['if'], data=[]] else: # Suppose bytestring v_enc = v # depends on [control=['except'], data=[]] prefix = encode('%s:' % len(v_enc), val_encoding) return prefix + v_enc def encode_(val): if isinstance(val, str_type): result = encode_str(val) # depends on [control=['if'], data=[]] elif isinstance(val, int_types): result = encode('i%se' % val, val_encoding) # depends on [control=['if'], data=[]] elif isinstance(val, (list, set, tuple)): result = encode('l', val_encoding) for item in val: result += encode_(item) # depends on [control=['for'], data=['item']] result += encode('e', val_encoding) # depends on [control=['if'], data=[]] elif isinstance(val, dict): result = encode('d', val_encoding) # Dictionaries are expected to be sorted by key. for (k, v) in OrderedDict(sorted(val.items(), key=itemgetter(0))).items(): result += encode_str(k) + encode_(v) # depends on [control=['for'], data=[]] result += encode('e', val_encoding) # depends on [control=['if'], data=[]] elif isinstance(val, byte_types): result = encode('%s:' % len(val), val_encoding) result += val # depends on [control=['if'], data=[]] else: raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val)) return result return encode_(value)
def dump_config(self): """Pretty print the configuration dict to stdout.""" yaml_content = self.get_merged_config() print('YAML Configuration\n%s\n' % yaml_content.read()) try: self.load() print('Python Configuration\n%s\n' % pretty(self.yamldocs)) except ConfigError: sys.stderr.write( 'config parse error. try running with --logfile=/dev/tty\n') raise
def function[dump_config, parameter[self]]: constant[Pretty print the configuration dict to stdout.] variable[yaml_content] assign[=] call[name[self].get_merged_config, parameter[]] call[name[print], parameter[binary_operation[constant[YAML Configuration %s ] <ast.Mod object at 0x7da2590d6920> call[name[yaml_content].read, parameter[]]]]] <ast.Try object at 0x7da18f722fe0>
keyword[def] identifier[dump_config] ( identifier[self] ): literal[string] identifier[yaml_content] = identifier[self] . identifier[get_merged_config] () identifier[print] ( literal[string] % identifier[yaml_content] . identifier[read] ()) keyword[try] : identifier[self] . identifier[load] () identifier[print] ( literal[string] % identifier[pretty] ( identifier[self] . identifier[yamldocs] )) keyword[except] identifier[ConfigError] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) keyword[raise]
def dump_config(self): """Pretty print the configuration dict to stdout.""" yaml_content = self.get_merged_config() print('YAML Configuration\n%s\n' % yaml_content.read()) try: self.load() print('Python Configuration\n%s\n' % pretty(self.yamldocs)) # depends on [control=['try'], data=[]] except ConfigError: sys.stderr.write('config parse error. try running with --logfile=/dev/tty\n') raise # depends on [control=['except'], data=[]]
def dump(self): """Writes the changes to the configuration file.""" try: import yaml cfg_file = self._cfg_file() cfg_dir, __ = os.path.split(cfg_file) os.makedirs(cfg_dir, exist_ok=True) with open(cfg_file, 'w') as f: yaml.dump(self, f) except ImportError as err: raise RuntimeError( 'Cannot dump the configuration settings to file. You need to ' 'install the necessary dependencies (pyyaml, appdirs).' ) from err
def function[dump, parameter[self]]: constant[Writes the changes to the configuration file.] <ast.Try object at 0x7da1b121b760>
keyword[def] identifier[dump] ( identifier[self] ): literal[string] keyword[try] : keyword[import] identifier[yaml] identifier[cfg_file] = identifier[self] . identifier[_cfg_file] () identifier[cfg_dir] , identifier[__] = identifier[os] . identifier[path] . identifier[split] ( identifier[cfg_file] ) identifier[os] . identifier[makedirs] ( identifier[cfg_dir] , identifier[exist_ok] = keyword[True] ) keyword[with] identifier[open] ( identifier[cfg_file] , literal[string] ) keyword[as] identifier[f] : identifier[yaml] . identifier[dump] ( identifier[self] , identifier[f] ) keyword[except] identifier[ImportError] keyword[as] identifier[err] : keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] ) keyword[from] identifier[err]
def dump(self): """Writes the changes to the configuration file.""" try: import yaml cfg_file = self._cfg_file() (cfg_dir, __) = os.path.split(cfg_file) os.makedirs(cfg_dir, exist_ok=True) with open(cfg_file, 'w') as f: yaml.dump(self, f) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except ImportError as err: raise RuntimeError('Cannot dump the configuration settings to file. You need to install the necessary dependencies (pyyaml, appdirs).') from err # depends on [control=['except'], data=['err']]
def initial_refcounts(self, initial_terms): """ Calculate initial refcounts for execution of this graph. Parameters ---------- initial_terms : iterable[Term] An iterable of terms that were pre-computed before graph execution. Each node starts with a refcount equal to its outdegree, and output nodes get one extra reference to ensure that they're still in the graph at the end of execution. """ refcounts = self.graph.out_degree() for t in self.outputs.values(): refcounts[t] += 1 for t in initial_terms: self._decref_dependencies_recursive(t, refcounts, set()) return refcounts
def function[initial_refcounts, parameter[self, initial_terms]]: constant[ Calculate initial refcounts for execution of this graph. Parameters ---------- initial_terms : iterable[Term] An iterable of terms that were pre-computed before graph execution. Each node starts with a refcount equal to its outdegree, and output nodes get one extra reference to ensure that they're still in the graph at the end of execution. ] variable[refcounts] assign[=] call[name[self].graph.out_degree, parameter[]] for taget[name[t]] in starred[call[name[self].outputs.values, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b2040550> for taget[name[t]] in starred[name[initial_terms]] begin[:] call[name[self]._decref_dependencies_recursive, parameter[name[t], name[refcounts], call[name[set], parameter[]]]] return[name[refcounts]]
keyword[def] identifier[initial_refcounts] ( identifier[self] , identifier[initial_terms] ): literal[string] identifier[refcounts] = identifier[self] . identifier[graph] . identifier[out_degree] () keyword[for] identifier[t] keyword[in] identifier[self] . identifier[outputs] . identifier[values] (): identifier[refcounts] [ identifier[t] ]+= literal[int] keyword[for] identifier[t] keyword[in] identifier[initial_terms] : identifier[self] . identifier[_decref_dependencies_recursive] ( identifier[t] , identifier[refcounts] , identifier[set] ()) keyword[return] identifier[refcounts]
def initial_refcounts(self, initial_terms): """ Calculate initial refcounts for execution of this graph. Parameters ---------- initial_terms : iterable[Term] An iterable of terms that were pre-computed before graph execution. Each node starts with a refcount equal to its outdegree, and output nodes get one extra reference to ensure that they're still in the graph at the end of execution. """ refcounts = self.graph.out_degree() for t in self.outputs.values(): refcounts[t] += 1 # depends on [control=['for'], data=['t']] for t in initial_terms: self._decref_dependencies_recursive(t, refcounts, set()) # depends on [control=['for'], data=['t']] return refcounts
def _filter_data_frame(df, data_col, filter_col, filter_str=None): """Return a filtered data frame as a dictionary.""" if filter_str is not None: relevant_cols = data_col + [filter_col] df.dropna(inplace=True, subset=relevant_cols) row_filter = df[filter_col].str.contains(filter_str, case=False) data_list = df[row_filter][data_col].to_dict() else: data_list = df[data_col].to_dict() return data_list
def function[_filter_data_frame, parameter[df, data_col, filter_col, filter_str]]: constant[Return a filtered data frame as a dictionary.] if compare[name[filter_str] is_not constant[None]] begin[:] variable[relevant_cols] assign[=] binary_operation[name[data_col] + list[[<ast.Name object at 0x7da20c993400>]]] call[name[df].dropna, parameter[]] variable[row_filter] assign[=] call[call[name[df]][name[filter_col]].str.contains, parameter[name[filter_str]]] variable[data_list] assign[=] call[call[call[name[df]][name[row_filter]]][name[data_col]].to_dict, parameter[]] return[name[data_list]]
keyword[def] identifier[_filter_data_frame] ( identifier[df] , identifier[data_col] , identifier[filter_col] , identifier[filter_str] = keyword[None] ): literal[string] keyword[if] identifier[filter_str] keyword[is] keyword[not] keyword[None] : identifier[relevant_cols] = identifier[data_col] +[ identifier[filter_col] ] identifier[df] . identifier[dropna] ( identifier[inplace] = keyword[True] , identifier[subset] = identifier[relevant_cols] ) identifier[row_filter] = identifier[df] [ identifier[filter_col] ]. identifier[str] . identifier[contains] ( identifier[filter_str] , identifier[case] = keyword[False] ) identifier[data_list] = identifier[df] [ identifier[row_filter] ][ identifier[data_col] ]. identifier[to_dict] () keyword[else] : identifier[data_list] = identifier[df] [ identifier[data_col] ]. identifier[to_dict] () keyword[return] identifier[data_list]
def _filter_data_frame(df, data_col, filter_col, filter_str=None): """Return a filtered data frame as a dictionary.""" if filter_str is not None: relevant_cols = data_col + [filter_col] df.dropna(inplace=True, subset=relevant_cols) row_filter = df[filter_col].str.contains(filter_str, case=False) data_list = df[row_filter][data_col].to_dict() # depends on [control=['if'], data=['filter_str']] else: data_list = df[data_col].to_dict() return data_list
def dskgtl(keywrd): """ Retrieve the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskgtl_c.html :param keywrd: Code specifying parameter to retrieve. :type keywrd: int :return: Value of parameter. :rtype: float """ keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(0) libspice.dskgtl_c(keywrd, ctypes.byref(dpval)) return dpval.value
def function[dskgtl, parameter[keywrd]]: constant[ Retrieve the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskgtl_c.html :param keywrd: Code specifying parameter to retrieve. :type keywrd: int :return: Value of parameter. :rtype: float ] variable[keywrd] assign[=] call[name[ctypes].c_int, parameter[name[keywrd]]] variable[dpval] assign[=] call[name[ctypes].c_double, parameter[constant[0]]] call[name[libspice].dskgtl_c, parameter[name[keywrd], call[name[ctypes].byref, parameter[name[dpval]]]]] return[name[dpval].value]
keyword[def] identifier[dskgtl] ( identifier[keywrd] ): literal[string] identifier[keywrd] = identifier[ctypes] . identifier[c_int] ( identifier[keywrd] ) identifier[dpval] = identifier[ctypes] . identifier[c_double] ( literal[int] ) identifier[libspice] . identifier[dskgtl_c] ( identifier[keywrd] , identifier[ctypes] . identifier[byref] ( identifier[dpval] )) keyword[return] identifier[dpval] . identifier[value]
def dskgtl(keywrd): """ Retrieve the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskgtl_c.html :param keywrd: Code specifying parameter to retrieve. :type keywrd: int :return: Value of parameter. :rtype: float """ keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(0) libspice.dskgtl_c(keywrd, ctypes.byref(dpval)) return dpval.value
def get_host(name): """ Prints the public dns name of `name`, if it exists. :param name: The instance name. :type name: ``str`` """ f = {'instance-state-name': 'running', 'tag:Name': name} ec2 = boto.connect_ec2(region=get_region()) rs = ec2.get_all_instances(filters=f) if len(rs) == 0: raise Exception('Host "%s" not found' % name) print(rs[0].instances[0].public_dns_name)
def function[get_host, parameter[name]]: constant[ Prints the public dns name of `name`, if it exists. :param name: The instance name. :type name: ``str`` ] variable[f] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93a30>, <ast.Constant object at 0x7da18fe92410>], [<ast.Constant object at 0x7da18fe919f0>, <ast.Name object at 0x7da18fe91f90>]] variable[ec2] assign[=] call[name[boto].connect_ec2, parameter[]] variable[rs] assign[=] call[name[ec2].get_all_instances, parameter[]] if compare[call[name[len], parameter[name[rs]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18fe90250> call[name[print], parameter[call[call[name[rs]][constant[0]].instances][constant[0]].public_dns_name]]
keyword[def] identifier[get_host] ( identifier[name] ): literal[string] identifier[f] ={ literal[string] : literal[string] , literal[string] : identifier[name] } identifier[ec2] = identifier[boto] . identifier[connect_ec2] ( identifier[region] = identifier[get_region] ()) identifier[rs] = identifier[ec2] . identifier[get_all_instances] ( identifier[filters] = identifier[f] ) keyword[if] identifier[len] ( identifier[rs] )== literal[int] : keyword[raise] identifier[Exception] ( literal[string] % identifier[name] ) identifier[print] ( identifier[rs] [ literal[int] ]. identifier[instances] [ literal[int] ]. identifier[public_dns_name] )
def get_host(name): """ Prints the public dns name of `name`, if it exists. :param name: The instance name. :type name: ``str`` """ f = {'instance-state-name': 'running', 'tag:Name': name} ec2 = boto.connect_ec2(region=get_region()) rs = ec2.get_all_instances(filters=f) if len(rs) == 0: raise Exception('Host "%s" not found' % name) # depends on [control=['if'], data=[]] print(rs[0].instances[0].public_dns_name)
def merge_lists(*args): """Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list """ out = {} for contacts in filter(None, args): for contact in contacts: out[contact.value] = contact return list(out.values())
def function[merge_lists, parameter[]]: constant[Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list ] variable[out] assign[=] dictionary[[], []] for taget[name[contacts]] in starred[call[name[filter], parameter[constant[None], name[args]]]] begin[:] for taget[name[contact]] in starred[name[contacts]] begin[:] call[name[out]][name[contact].value] assign[=] name[contact] return[call[name[list], parameter[call[name[out].values, parameter[]]]]]
keyword[def] identifier[merge_lists] (* identifier[args] ): literal[string] identifier[out] ={} keyword[for] identifier[contacts] keyword[in] identifier[filter] ( keyword[None] , identifier[args] ): keyword[for] identifier[contact] keyword[in] identifier[contacts] : identifier[out] [ identifier[contact] . identifier[value] ]= identifier[contact] keyword[return] identifier[list] ( identifier[out] . identifier[values] ())
def merge_lists(*args): """Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list """ out = {} for contacts in filter(None, args): for contact in contacts: out[contact.value] = contact # depends on [control=['for'], data=['contact']] # depends on [control=['for'], data=['contacts']] return list(out.values())
def do(self, x_orig): """Transform the unknowns to preconditioned coordinates This method also transforms the gradient to original coordinates """ if self.scales is None: return x_orig else: return np.dot(self.rotation.transpose(), x_orig)*self.scales
def function[do, parameter[self, x_orig]]: constant[Transform the unknowns to preconditioned coordinates This method also transforms the gradient to original coordinates ] if compare[name[self].scales is constant[None]] begin[:] return[name[x_orig]]
keyword[def] identifier[do] ( identifier[self] , identifier[x_orig] ): literal[string] keyword[if] identifier[self] . identifier[scales] keyword[is] keyword[None] : keyword[return] identifier[x_orig] keyword[else] : keyword[return] identifier[np] . identifier[dot] ( identifier[self] . identifier[rotation] . identifier[transpose] (), identifier[x_orig] )* identifier[self] . identifier[scales]
def do(self, x_orig): """Transform the unknowns to preconditioned coordinates This method also transforms the gradient to original coordinates """ if self.scales is None: return x_orig # depends on [control=['if'], data=[]] else: return np.dot(self.rotation.transpose(), x_orig) * self.scales
def set_storage(self, storage): """Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance """ if isinstance(storage, BaseStorage): self.storage = storage elif isinstance(storage, dict): if 'backend' not in storage and 'root_dir' in storage: storage['backend'] = 'FileSystem' try: backend_cls = getattr(storage_package, storage['backend']) except AttributeError: try: backend_cls = import_module(storage['backend']) except ImportError: self.logger.error('cannot find backend module %s', storage['backend']) sys.exit() kwargs = storage.copy() del kwargs['backend'] self.storage = backend_cls(**kwargs) else: raise TypeError('"storage" must be a storage object or dict')
def function[set_storage, parameter[self, storage]]: constant[Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance ] if call[name[isinstance], parameter[name[storage], name[BaseStorage]]] begin[:] name[self].storage assign[=] name[storage]
keyword[def] identifier[set_storage] ( identifier[self] , identifier[storage] ): literal[string] keyword[if] identifier[isinstance] ( identifier[storage] , identifier[BaseStorage] ): identifier[self] . identifier[storage] = identifier[storage] keyword[elif] identifier[isinstance] ( identifier[storage] , identifier[dict] ): keyword[if] literal[string] keyword[not] keyword[in] identifier[storage] keyword[and] literal[string] keyword[in] identifier[storage] : identifier[storage] [ literal[string] ]= literal[string] keyword[try] : identifier[backend_cls] = identifier[getattr] ( identifier[storage_package] , identifier[storage] [ literal[string] ]) keyword[except] identifier[AttributeError] : keyword[try] : identifier[backend_cls] = identifier[import_module] ( identifier[storage] [ literal[string] ]) keyword[except] identifier[ImportError] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[storage] [ literal[string] ]) identifier[sys] . identifier[exit] () identifier[kwargs] = identifier[storage] . identifier[copy] () keyword[del] identifier[kwargs] [ literal[string] ] identifier[self] . identifier[storage] = identifier[backend_cls] (** identifier[kwargs] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def set_storage(self, storage): """Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance """ if isinstance(storage, BaseStorage): self.storage = storage # depends on [control=['if'], data=[]] elif isinstance(storage, dict): if 'backend' not in storage and 'root_dir' in storage: storage['backend'] = 'FileSystem' # depends on [control=['if'], data=[]] try: backend_cls = getattr(storage_package, storage['backend']) # depends on [control=['try'], data=[]] except AttributeError: try: backend_cls = import_module(storage['backend']) # depends on [control=['try'], data=[]] except ImportError: self.logger.error('cannot find backend module %s', storage['backend']) sys.exit() # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] kwargs = storage.copy() del kwargs['backend'] self.storage = backend_cls(**kwargs) # depends on [control=['if'], data=[]] else: raise TypeError('"storage" must be a storage object or dict')
def _extract_docs_raises(self): """Extract raises description from docstring. The internal computed raises list is composed by tuples (raise, description). """ if self.dst.style['in'] == 'numpydoc': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) self.docs['in']['raises'] += self.dst.numpydoc.get_raise_list(data) if self.dst.style['in'] == 'google': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) self.docs['in']['raises'] += self.dst.googledoc.get_raise_list(data) elif self.dst.style['in'] == 'groups': self._extract_groupstyle_docs_raises() elif self.dst.style['in'] in ['javadoc', 'reST']: self._extract_tagstyle_docs_raises()
def function[_extract_docs_raises, parameter[self]]: constant[Extract raises description from docstring. The internal computed raises list is composed by tuples (raise, description). ] if compare[call[name[self].dst.style][constant[in]] equal[==] constant[numpydoc]] begin[:] variable[data] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b11aaad0>]] <ast.AugAssign object at 0x7da1b11aa890> if compare[call[name[self].dst.style][constant[in]] equal[==] constant[google]] begin[:] variable[data] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b11aab30>]] <ast.AugAssign object at 0x7da1b11a89d0>
keyword[def] identifier[_extract_docs_raises] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[dst] . identifier[style] [ literal[string] ]== literal[string] : identifier[data] = literal[string] . identifier[join] ([ identifier[d] . identifier[rstrip] (). identifier[replace] ( identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ], literal[string] , literal[int] ) keyword[for] identifier[d] keyword[in] identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ]. identifier[splitlines] ()]) identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ]+= identifier[self] . identifier[dst] . identifier[numpydoc] . identifier[get_raise_list] ( identifier[data] ) keyword[if] identifier[self] . identifier[dst] . identifier[style] [ literal[string] ]== literal[string] : identifier[data] = literal[string] . identifier[join] ([ identifier[d] . identifier[rstrip] (). identifier[replace] ( identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ], literal[string] , literal[int] ) keyword[for] identifier[d] keyword[in] identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ]. identifier[splitlines] ()]) identifier[self] . identifier[docs] [ literal[string] ][ literal[string] ]+= identifier[self] . identifier[dst] . identifier[googledoc] . identifier[get_raise_list] ( identifier[data] ) keyword[elif] identifier[self] . identifier[dst] . identifier[style] [ literal[string] ]== literal[string] : identifier[self] . identifier[_extract_groupstyle_docs_raises] () keyword[elif] identifier[self] . identifier[dst] . identifier[style] [ literal[string] ] keyword[in] [ literal[string] , literal[string] ]: identifier[self] . identifier[_extract_tagstyle_docs_raises] ()
def _extract_docs_raises(self): """Extract raises description from docstring. The internal computed raises list is composed by tuples (raise, description). """ if self.dst.style['in'] == 'numpydoc': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) self.docs['in']['raises'] += self.dst.numpydoc.get_raise_list(data) # depends on [control=['if'], data=[]] if self.dst.style['in'] == 'google': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) self.docs['in']['raises'] += self.dst.googledoc.get_raise_list(data) # depends on [control=['if'], data=[]] elif self.dst.style['in'] == 'groups': self._extract_groupstyle_docs_raises() # depends on [control=['if'], data=[]] elif self.dst.style['in'] in ['javadoc', 'reST']: self._extract_tagstyle_docs_raises() # depends on [control=['if'], data=[]]
def in_casapy (helper, vis=None, figfile=None): """This function is run inside the weirdo casapy IPython environment! A strange set of modules is available, and the `pwkit.environments.casa.scripting` system sets up a very particular environment to allow encapsulated scripting. """ if vis is None: raise ValueError ('vis') helper.casans.plotants (vis=vis, figfile=figfile)
def function[in_casapy, parameter[helper, vis, figfile]]: constant[This function is run inside the weirdo casapy IPython environment! A strange set of modules is available, and the `pwkit.environments.casa.scripting` system sets up a very particular environment to allow encapsulated scripting. ] if compare[name[vis] is constant[None]] begin[:] <ast.Raise object at 0x7da1b27b88b0> call[name[helper].casans.plotants, parameter[]]
keyword[def] identifier[in_casapy] ( identifier[helper] , identifier[vis] = keyword[None] , identifier[figfile] = keyword[None] ): literal[string] keyword[if] identifier[vis] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[helper] . identifier[casans] . identifier[plotants] ( identifier[vis] = identifier[vis] , identifier[figfile] = identifier[figfile] )
def in_casapy(helper, vis=None, figfile=None): """This function is run inside the weirdo casapy IPython environment! A strange set of modules is available, and the `pwkit.environments.casa.scripting` system sets up a very particular environment to allow encapsulated scripting. """ if vis is None: raise ValueError('vis') # depends on [control=['if'], data=[]] helper.casans.plotants(vis=vis, figfile=figfile)
def _compute_edges(self): """Compute the edges of the current surface. Returns: Tuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of the surface. """ nodes1, nodes2, nodes3 = _surface_helpers.compute_edge_nodes( self._nodes, self._degree ) edge1 = _curve_mod.Curve(nodes1, self._degree, _copy=False) edge2 = _curve_mod.Curve(nodes2, self._degree, _copy=False) edge3 = _curve_mod.Curve(nodes3, self._degree, _copy=False) return edge1, edge2, edge3
def function[_compute_edges, parameter[self]]: constant[Compute the edges of the current surface. Returns: Tuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of the surface. ] <ast.Tuple object at 0x7da204620a60> assign[=] call[name[_surface_helpers].compute_edge_nodes, parameter[name[self]._nodes, name[self]._degree]] variable[edge1] assign[=] call[name[_curve_mod].Curve, parameter[name[nodes1], name[self]._degree]] variable[edge2] assign[=] call[name[_curve_mod].Curve, parameter[name[nodes2], name[self]._degree]] variable[edge3] assign[=] call[name[_curve_mod].Curve, parameter[name[nodes3], name[self]._degree]] return[tuple[[<ast.Name object at 0x7da1b184ac20>, <ast.Name object at 0x7da1b184b9d0>, <ast.Name object at 0x7da1b1848e80>]]]
keyword[def] identifier[_compute_edges] ( identifier[self] ): literal[string] identifier[nodes1] , identifier[nodes2] , identifier[nodes3] = identifier[_surface_helpers] . identifier[compute_edge_nodes] ( identifier[self] . identifier[_nodes] , identifier[self] . identifier[_degree] ) identifier[edge1] = identifier[_curve_mod] . identifier[Curve] ( identifier[nodes1] , identifier[self] . identifier[_degree] , identifier[_copy] = keyword[False] ) identifier[edge2] = identifier[_curve_mod] . identifier[Curve] ( identifier[nodes2] , identifier[self] . identifier[_degree] , identifier[_copy] = keyword[False] ) identifier[edge3] = identifier[_curve_mod] . identifier[Curve] ( identifier[nodes3] , identifier[self] . identifier[_degree] , identifier[_copy] = keyword[False] ) keyword[return] identifier[edge1] , identifier[edge2] , identifier[edge3]
def _compute_edges(self): """Compute the edges of the current surface. Returns: Tuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of the surface. """ (nodes1, nodes2, nodes3) = _surface_helpers.compute_edge_nodes(self._nodes, self._degree) edge1 = _curve_mod.Curve(nodes1, self._degree, _copy=False) edge2 = _curve_mod.Curve(nodes2, self._degree, _copy=False) edge3 = _curve_mod.Curve(nodes3, self._degree, _copy=False) return (edge1, edge2, edge3)
def minimum(lhs, rhs): """Returns element-wise minimum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise minimum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.minimum(x, 2).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(z, y).asnumpy() array([[ 0., 0.], [ 0., 1.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_minimum, lambda x, y: x if x < y else y, _internal._minimum_scalar, None)
def function[minimum, parameter[lhs, rhs]]: constant[Returns element-wise minimum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise minimum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.minimum(x, 2).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(z, y).asnumpy() array([[ 0., 0.], [ 0., 1.]], dtype=float32) ] return[call[name[_ufunc_helper], parameter[name[lhs], name[rhs], name[op].broadcast_minimum, <ast.Lambda object at 0x7da1b200a9b0>, name[_internal]._minimum_scalar, constant[None]]]]
keyword[def] identifier[minimum] ( identifier[lhs] , identifier[rhs] ): literal[string] keyword[return] identifier[_ufunc_helper] ( identifier[lhs] , identifier[rhs] , identifier[op] . identifier[broadcast_minimum] , keyword[lambda] identifier[x] , identifier[y] : identifier[x] keyword[if] identifier[x] < identifier[y] keyword[else] identifier[y] , identifier[_internal] . identifier[_minimum_scalar] , keyword[None] )
def minimum(lhs, rhs): """Returns element-wise minimum of the input arrays with broadcasting. Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise minimum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.minimum(x, 2).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.minimum(z, y).asnumpy() array([[ 0., 0.], [ 0., 1.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper(lhs, rhs, op.broadcast_minimum, lambda x, y: x if x < y else y, _internal._minimum_scalar, None)
def expire(self, current_time=None): """Expire any old entries `current_time` Optional time to be used to clean up queue (can be used in unit tests) """ if not self._queue: return if current_time is None: current_time = time() while self._queue: # Get top most item top = self._queue[0] # Early exit if item was not promoted and its expiration time # is greater than now. if top.promoted is None and top.expiry_date > current_time: break # Pop item from the stack top = heappop(self._queue) need_reschedule = (top.promoted is not None and top.promoted > current_time) # Give chance to reschedule if not need_reschedule: top.promoted = None top.on_delete(False) need_reschedule = (top.promoted is not None and top.promoted > current_time) # If item is promoted and expiration time somewhere in future # just reschedule it if need_reschedule: top.expiry_date = top.promoted top.promoted = None heappush(self._queue, top) else: del self._items[top.session_id]
def function[expire, parameter[self, current_time]]: constant[Expire any old entries `current_time` Optional time to be used to clean up queue (can be used in unit tests) ] if <ast.UnaryOp object at 0x7da1b0bf4fa0> begin[:] return[None] if compare[name[current_time] is constant[None]] begin[:] variable[current_time] assign[=] call[name[time], parameter[]] while name[self]._queue begin[:] variable[top] assign[=] call[name[self]._queue][constant[0]] if <ast.BoolOp object at 0x7da1b0bf6650> begin[:] break variable[top] assign[=] call[name[heappop], parameter[name[self]._queue]] variable[need_reschedule] assign[=] <ast.BoolOp object at 0x7da1b0bf6e00> if <ast.UnaryOp object at 0x7da1b0be2320> begin[:] name[top].promoted assign[=] constant[None] call[name[top].on_delete, parameter[constant[False]]] variable[need_reschedule] assign[=] <ast.BoolOp object at 0x7da1b0be0e80> if name[need_reschedule] begin[:] name[top].expiry_date assign[=] name[top].promoted name[top].promoted assign[=] constant[None] call[name[heappush], parameter[name[self]._queue, name[top]]]
keyword[def] identifier[expire] ( identifier[self] , identifier[current_time] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_queue] : keyword[return] keyword[if] identifier[current_time] keyword[is] keyword[None] : identifier[current_time] = identifier[time] () keyword[while] identifier[self] . identifier[_queue] : identifier[top] = identifier[self] . identifier[_queue] [ literal[int] ] keyword[if] identifier[top] . identifier[promoted] keyword[is] keyword[None] keyword[and] identifier[top] . identifier[expiry_date] > identifier[current_time] : keyword[break] identifier[top] = identifier[heappop] ( identifier[self] . identifier[_queue] ) identifier[need_reschedule] =( identifier[top] . identifier[promoted] keyword[is] keyword[not] keyword[None] keyword[and] identifier[top] . identifier[promoted] > identifier[current_time] ) keyword[if] keyword[not] identifier[need_reschedule] : identifier[top] . identifier[promoted] = keyword[None] identifier[top] . identifier[on_delete] ( keyword[False] ) identifier[need_reschedule] =( identifier[top] . identifier[promoted] keyword[is] keyword[not] keyword[None] keyword[and] identifier[top] . identifier[promoted] > identifier[current_time] ) keyword[if] identifier[need_reschedule] : identifier[top] . identifier[expiry_date] = identifier[top] . identifier[promoted] identifier[top] . identifier[promoted] = keyword[None] identifier[heappush] ( identifier[self] . identifier[_queue] , identifier[top] ) keyword[else] : keyword[del] identifier[self] . identifier[_items] [ identifier[top] . identifier[session_id] ]
def expire(self, current_time=None): """Expire any old entries `current_time` Optional time to be used to clean up queue (can be used in unit tests) """ if not self._queue: return # depends on [control=['if'], data=[]] if current_time is None: current_time = time() # depends on [control=['if'], data=['current_time']] while self._queue: # Get top most item top = self._queue[0] # Early exit if item was not promoted and its expiration time # is greater than now. if top.promoted is None and top.expiry_date > current_time: break # depends on [control=['if'], data=[]] # Pop item from the stack top = heappop(self._queue) need_reschedule = top.promoted is not None and top.promoted > current_time # Give chance to reschedule if not need_reschedule: top.promoted = None top.on_delete(False) need_reschedule = top.promoted is not None and top.promoted > current_time # depends on [control=['if'], data=[]] # If item is promoted and expiration time somewhere in future # just reschedule it if need_reschedule: top.expiry_date = top.promoted top.promoted = None heappush(self._queue, top) # depends on [control=['if'], data=[]] else: del self._items[top.session_id] # depends on [control=['while'], data=[]]
def edge_detect(image, size): """ Applies a Sobel filter to the given image. :param image: An image as a list of (R,G,B) values :param size: The size of the image as a tuple (width, height) :return: An array of the Sobel gradient value of each image pixel This value roughly corresponds to how much of an "edge" a pixel is. """ # TODO get edge data for boundaries width, height = size edge_data = [0] * len(image) gray_scale_img = list(map(luma, image)) for y in range(1, height - 1): for x in range(1, width - 1): idx = coords_to_index((x, y), width) a, b, c = gray_scale_img[idx - 1 - width: idx + 2 - width] d, e, f = gray_scale_img[idx - 1: idx + 2] g, h, i = gray_scale_img[idx - 1 + width: idx + 2 + width] g_x = -a - 2 * d - d + c + 2 * f + i g_y = -a - 2 * b - c + g + 2 * h + i g = sqrt(g_x * g_x + g_y * g_y) edge_data[idx] = g if idx % 200000 == 0: logger.info("Edge detection done for %d / %d pixels... (%2.2f%%)" % (idx, width * height, 100 * idx / float(width * height))) return edge_data
def function[edge_detect, parameter[image, size]]: constant[ Applies a Sobel filter to the given image. :param image: An image as a list of (R,G,B) values :param size: The size of the image as a tuple (width, height) :return: An array of the Sobel gradient value of each image pixel This value roughly corresponds to how much of an "edge" a pixel is. ] <ast.Tuple object at 0x7da1b1507e20> assign[=] name[size] variable[edge_data] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1507f10>]] * call[name[len], parameter[name[image]]]] variable[gray_scale_img] assign[=] call[name[list], parameter[call[name[map], parameter[name[luma], name[image]]]]] for taget[name[y]] in starred[call[name[range], parameter[constant[1], binary_operation[name[height] - constant[1]]]]] begin[:] for taget[name[x]] in starred[call[name[range], parameter[constant[1], binary_operation[name[width] - constant[1]]]]] begin[:] variable[idx] assign[=] call[name[coords_to_index], parameter[tuple[[<ast.Name object at 0x7da1b15ea9e0>, <ast.Name object at 0x7da1b15ea8c0>]], name[width]]] <ast.Tuple object at 0x7da1b15e9b70> assign[=] call[name[gray_scale_img]][<ast.Slice object at 0x7da1b15e8c70>] <ast.Tuple object at 0x7da1b15e8a00> assign[=] call[name[gray_scale_img]][<ast.Slice object at 0x7da1b15ebdf0>] <ast.Tuple object at 0x7da1b15ebdc0> assign[=] call[name[gray_scale_img]][<ast.Slice object at 0x7da1b15ea6e0>] variable[g_x] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b135b250> - binary_operation[constant[2] * name[d]]] - name[d]] + name[c]] + binary_operation[constant[2] * name[f]]] + name[i]] variable[g_y] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b1359e70> - binary_operation[constant[2] * name[b]]] - name[c]] + name[g]] + binary_operation[constant[2] * name[h]]] + name[i]] variable[g] assign[=] call[name[sqrt], parameter[binary_operation[binary_operation[name[g_x] * name[g_x]] + binary_operation[name[g_y] * name[g_y]]]]] call[name[edge_data]][name[idx]] assign[=] name[g] if compare[binary_operation[name[idx] <ast.Mod object at 0x7da2590d6920> constant[200000]] equal[==] constant[0]] begin[:] call[name[logger].info, parameter[binary_operation[constant[Edge detection done for %d / %d pixels... (%2.2f%%)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b135a920>, <ast.BinOp object at 0x7da1b1359060>, <ast.BinOp object at 0x7da1b1358520>]]]]] return[name[edge_data]]
keyword[def] identifier[edge_detect] ( identifier[image] , identifier[size] ): literal[string] identifier[width] , identifier[height] = identifier[size] identifier[edge_data] =[ literal[int] ]* identifier[len] ( identifier[image] ) identifier[gray_scale_img] = identifier[list] ( identifier[map] ( identifier[luma] , identifier[image] )) keyword[for] identifier[y] keyword[in] identifier[range] ( literal[int] , identifier[height] - literal[int] ): keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , identifier[width] - literal[int] ): identifier[idx] = identifier[coords_to_index] (( identifier[x] , identifier[y] ), identifier[width] ) identifier[a] , identifier[b] , identifier[c] = identifier[gray_scale_img] [ identifier[idx] - literal[int] - identifier[width] : identifier[idx] + literal[int] - identifier[width] ] identifier[d] , identifier[e] , identifier[f] = identifier[gray_scale_img] [ identifier[idx] - literal[int] : identifier[idx] + literal[int] ] identifier[g] , identifier[h] , identifier[i] = identifier[gray_scale_img] [ identifier[idx] - literal[int] + identifier[width] : identifier[idx] + literal[int] + identifier[width] ] identifier[g_x] =- identifier[a] - literal[int] * identifier[d] - identifier[d] + identifier[c] + literal[int] * identifier[f] + identifier[i] identifier[g_y] =- identifier[a] - literal[int] * identifier[b] - identifier[c] + identifier[g] + literal[int] * identifier[h] + identifier[i] identifier[g] = identifier[sqrt] ( identifier[g_x] * identifier[g_x] + identifier[g_y] * identifier[g_y] ) identifier[edge_data] [ identifier[idx] ]= identifier[g] keyword[if] identifier[idx] % literal[int] == literal[int] : identifier[logger] . identifier[info] ( literal[string] % ( identifier[idx] , identifier[width] * identifier[height] , literal[int] * identifier[idx] / identifier[float] ( identifier[width] * identifier[height] ))) keyword[return] identifier[edge_data]
def edge_detect(image, size): """ Applies a Sobel filter to the given image. :param image: An image as a list of (R,G,B) values :param size: The size of the image as a tuple (width, height) :return: An array of the Sobel gradient value of each image pixel This value roughly corresponds to how much of an "edge" a pixel is. """ # TODO get edge data for boundaries (width, height) = size edge_data = [0] * len(image) gray_scale_img = list(map(luma, image)) for y in range(1, height - 1): for x in range(1, width - 1): idx = coords_to_index((x, y), width) (a, b, c) = gray_scale_img[idx - 1 - width:idx + 2 - width] (d, e, f) = gray_scale_img[idx - 1:idx + 2] (g, h, i) = gray_scale_img[idx - 1 + width:idx + 2 + width] g_x = -a - 2 * d - d + c + 2 * f + i g_y = -a - 2 * b - c + g + 2 * h + i g = sqrt(g_x * g_x + g_y * g_y) edge_data[idx] = g if idx % 200000 == 0: logger.info('Edge detection done for %d / %d pixels... (%2.2f%%)' % (idx, width * height, 100 * idx / float(width * height))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] return edge_data
def reconcile(self, server): """ Reconcile this collection with the server. """ if not self.challenge.exists(server): raise Exception('Challenge does not exist on server') existing = MapRouletteTaskCollection.from_server(server, self.challenge) same = [] new = [] changed = [] deleted = [] # reconcile the new tasks with the existing tasks: for task in self.tasks: # if the task exists on the server... if task.identifier in [existing_task.identifier for existing_task in existing.tasks]: # and they are equal... if task == existing.get_by_identifier(task.identifier): # add to 'same' list same.append(task) # if they are not equal, add to 'changed' list else: changed.append(task) # if the task does not exist on the server, add to 'new' list else: new.append(task) # next, check for tasks on the server that don't exist in the new collection... for task in existing.tasks: if task.identifier not in [task.identifier for task in self.tasks]: # ... and add those to the 'deleted' list. deleted.append(task) # update the server with new, changed, and deleted tasks if new: newCollection = MapRouletteTaskCollection(self.challenge, tasks=new) newCollection.create(server) if changed: changedCollection = MapRouletteTaskCollection(self.challenge, tasks=changed) changedCollection.update(server) if deleted: deletedCollection = MapRouletteTaskCollection(self.challenge, tasks=deleted) for task in deletedCollection.tasks: task.status = 'deleted' deletedCollection.update(server) # return same, new, changed and deleted tasks return {'same': same, 'new': new, 'changed': changed, 'deleted': deleted}
def function[reconcile, parameter[self, server]]: constant[ Reconcile this collection with the server. ] if <ast.UnaryOp object at 0x7da20c6c4430> begin[:] <ast.Raise object at 0x7da20c6c78e0> variable[existing] assign[=] call[name[MapRouletteTaskCollection].from_server, parameter[name[server], name[self].challenge]] variable[same] assign[=] list[[]] variable[new] assign[=] list[[]] variable[changed] assign[=] list[[]] variable[deleted] assign[=] list[[]] for taget[name[task]] in starred[name[self].tasks] begin[:] if compare[name[task].identifier in <ast.ListComp object at 0x7da20c6c42e0>] begin[:] if compare[name[task] equal[==] call[name[existing].get_by_identifier, parameter[name[task].identifier]]] begin[:] call[name[same].append, parameter[name[task]]] for taget[name[task]] in starred[name[existing].tasks] begin[:] if compare[name[task].identifier <ast.NotIn object at 0x7da2590d7190> <ast.ListComp object at 0x7da20c6c5450>] begin[:] call[name[deleted].append, parameter[name[task]]] if name[new] begin[:] variable[newCollection] assign[=] call[name[MapRouletteTaskCollection], parameter[name[self].challenge]] call[name[newCollection].create, parameter[name[server]]] if name[changed] begin[:] variable[changedCollection] assign[=] call[name[MapRouletteTaskCollection], parameter[name[self].challenge]] call[name[changedCollection].update, parameter[name[server]]] if name[deleted] begin[:] variable[deletedCollection] assign[=] call[name[MapRouletteTaskCollection], parameter[name[self].challenge]] for taget[name[task]] in starred[name[deletedCollection].tasks] begin[:] name[task].status assign[=] constant[deleted] call[name[deletedCollection].update, parameter[name[server]]] return[dictionary[[<ast.Constant object at 0x7da20c6c7970>, <ast.Constant object at 0x7da20c6c48e0>, <ast.Constant object at 0x7da20c6c7c10>, <ast.Constant object at 0x7da20c6c4f10>], [<ast.Name object at 0x7da20c6c6b60>, <ast.Name object at 0x7da20c6c4e80>, <ast.Name object at 0x7da20c6c7160>, <ast.Name object at 0x7da20c6c4880>]]]
keyword[def] identifier[reconcile] ( identifier[self] , identifier[server] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[challenge] . identifier[exists] ( identifier[server] ): keyword[raise] identifier[Exception] ( literal[string] ) identifier[existing] = identifier[MapRouletteTaskCollection] . identifier[from_server] ( identifier[server] , identifier[self] . identifier[challenge] ) identifier[same] =[] identifier[new] =[] identifier[changed] =[] identifier[deleted] =[] keyword[for] identifier[task] keyword[in] identifier[self] . identifier[tasks] : keyword[if] identifier[task] . identifier[identifier] keyword[in] [ identifier[existing_task] . identifier[identifier] keyword[for] identifier[existing_task] keyword[in] identifier[existing] . identifier[tasks] ]: keyword[if] identifier[task] == identifier[existing] . identifier[get_by_identifier] ( identifier[task] . identifier[identifier] ): identifier[same] . identifier[append] ( identifier[task] ) keyword[else] : identifier[changed] . identifier[append] ( identifier[task] ) keyword[else] : identifier[new] . identifier[append] ( identifier[task] ) keyword[for] identifier[task] keyword[in] identifier[existing] . identifier[tasks] : keyword[if] identifier[task] . identifier[identifier] keyword[not] keyword[in] [ identifier[task] . identifier[identifier] keyword[for] identifier[task] keyword[in] identifier[self] . identifier[tasks] ]: identifier[deleted] . identifier[append] ( identifier[task] ) keyword[if] identifier[new] : identifier[newCollection] = identifier[MapRouletteTaskCollection] ( identifier[self] . identifier[challenge] , identifier[tasks] = identifier[new] ) identifier[newCollection] . identifier[create] ( identifier[server] ) keyword[if] identifier[changed] : identifier[changedCollection] = identifier[MapRouletteTaskCollection] ( identifier[self] . identifier[challenge] , identifier[tasks] = identifier[changed] ) identifier[changedCollection] . identifier[update] ( identifier[server] ) keyword[if] identifier[deleted] : identifier[deletedCollection] = identifier[MapRouletteTaskCollection] ( identifier[self] . identifier[challenge] , identifier[tasks] = identifier[deleted] ) keyword[for] identifier[task] keyword[in] identifier[deletedCollection] . identifier[tasks] : identifier[task] . identifier[status] = literal[string] identifier[deletedCollection] . identifier[update] ( identifier[server] ) keyword[return] { literal[string] : identifier[same] , literal[string] : identifier[new] , literal[string] : identifier[changed] , literal[string] : identifier[deleted] }
def reconcile(self, server): """ Reconcile this collection with the server. """ if not self.challenge.exists(server): raise Exception('Challenge does not exist on server') # depends on [control=['if'], data=[]] existing = MapRouletteTaskCollection.from_server(server, self.challenge) same = [] new = [] changed = [] deleted = [] # reconcile the new tasks with the existing tasks: for task in self.tasks: # if the task exists on the server... if task.identifier in [existing_task.identifier for existing_task in existing.tasks]: # and they are equal... if task == existing.get_by_identifier(task.identifier): # add to 'same' list same.append(task) # depends on [control=['if'], data=['task']] else: # if they are not equal, add to 'changed' list changed.append(task) # depends on [control=['if'], data=[]] else: # if the task does not exist on the server, add to 'new' list new.append(task) # depends on [control=['for'], data=['task']] # next, check for tasks on the server that don't exist in the new collection... for task in existing.tasks: if task.identifier not in [task.identifier for task in self.tasks]: # ... and add those to the 'deleted' list. deleted.append(task) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']] # update the server with new, changed, and deleted tasks if new: newCollection = MapRouletteTaskCollection(self.challenge, tasks=new) newCollection.create(server) # depends on [control=['if'], data=[]] if changed: changedCollection = MapRouletteTaskCollection(self.challenge, tasks=changed) changedCollection.update(server) # depends on [control=['if'], data=[]] if deleted: deletedCollection = MapRouletteTaskCollection(self.challenge, tasks=deleted) for task in deletedCollection.tasks: task.status = 'deleted' # depends on [control=['for'], data=['task']] deletedCollection.update(server) # depends on [control=['if'], data=[]] # return same, new, changed and deleted tasks return {'same': same, 'new': new, 'changed': changed, 'deleted': deleted}
def disconnect_relationship(cls, id, related_collection_name, request_json): """ Disconnect one or more relationship in a collection with cardinality 'Many'. :param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \ be set in the model -- it is not the same as the node id :param related_collection_name: The name of the relationship :param request_json: a dictionary formatted according to the specification at \ http://jsonapi.org/format/#crud-updating-relationships :return: A response according to the same specification """ try: this_resource = cls.nodes.get(id=id, active=True) related_collection = getattr(this_resource, related_collection_name) rsrc_identifier_list = request_json['data'] if not isinstance(rsrc_identifier_list, list): raise WrongTypeError for rsrc_identifier in rsrc_identifier_list: connected_resource = cls.get_class_from_type(rsrc_identifier['type']).nodes.get( id=rsrc_identifier['id'] ) related_collection.disconnect(connected_resource) r = make_response('') r.status_code = http_error_codes.NO_CONTENT r.headers['Content-Type'] = CONTENT_TYPE except DoesNotExist: r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND]) except (KeyError, WrongTypeError): r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION]) return r
def function[disconnect_relationship, parameter[cls, id, related_collection_name, request_json]]: constant[ Disconnect one or more relationship in a collection with cardinality 'Many'. :param id: The 'id' field of the node on the left side of the relationship in the database. The id field must be set in the model -- it is not the same as the node id :param related_collection_name: The name of the relationship :param request_json: a dictionary formatted according to the specification at http://jsonapi.org/format/#crud-updating-relationships :return: A response according to the same specification ] <ast.Try object at 0x7da1b0a04610> return[name[r]]
keyword[def] identifier[disconnect_relationship] ( identifier[cls] , identifier[id] , identifier[related_collection_name] , identifier[request_json] ): literal[string] keyword[try] : identifier[this_resource] = identifier[cls] . identifier[nodes] . identifier[get] ( identifier[id] = identifier[id] , identifier[active] = keyword[True] ) identifier[related_collection] = identifier[getattr] ( identifier[this_resource] , identifier[related_collection_name] ) identifier[rsrc_identifier_list] = identifier[request_json] [ literal[string] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[rsrc_identifier_list] , identifier[list] ): keyword[raise] identifier[WrongTypeError] keyword[for] identifier[rsrc_identifier] keyword[in] identifier[rsrc_identifier_list] : identifier[connected_resource] = identifier[cls] . identifier[get_class_from_type] ( identifier[rsrc_identifier] [ literal[string] ]). identifier[nodes] . identifier[get] ( identifier[id] = identifier[rsrc_identifier] [ literal[string] ] ) identifier[related_collection] . identifier[disconnect] ( identifier[connected_resource] ) identifier[r] = identifier[make_response] ( literal[string] ) identifier[r] . identifier[status_code] = identifier[http_error_codes] . identifier[NO_CONTENT] identifier[r] . identifier[headers] [ literal[string] ]= identifier[CONTENT_TYPE] keyword[except] identifier[DoesNotExist] : identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[RESOURCE_NOT_FOUND] ]) keyword[except] ( identifier[KeyError] , identifier[WrongTypeError] ): identifier[r] = identifier[application_codes] . identifier[error_response] ([ identifier[application_codes] . identifier[BAD_FORMAT_VIOLATION] ]) keyword[return] identifier[r]
def disconnect_relationship(cls, id, related_collection_name, request_json): """ Disconnect one or more relationship in a collection with cardinality 'Many'. :param id: The 'id' field of the node on the left side of the relationship in the database. The id field must be set in the model -- it is not the same as the node id :param related_collection_name: The name of the relationship :param request_json: a dictionary formatted according to the specification at http://jsonapi.org/format/#crud-updating-relationships :return: A response according to the same specification """ try: this_resource = cls.nodes.get(id=id, active=True) related_collection = getattr(this_resource, related_collection_name) rsrc_identifier_list = request_json['data'] if not isinstance(rsrc_identifier_list, list): raise WrongTypeError # depends on [control=['if'], data=[]] for rsrc_identifier in rsrc_identifier_list: connected_resource = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id']) related_collection.disconnect(connected_resource) # depends on [control=['for'], data=['rsrc_identifier']] r = make_response('') r.status_code = http_error_codes.NO_CONTENT r.headers['Content-Type'] = CONTENT_TYPE # depends on [control=['try'], data=[]] except DoesNotExist: r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND]) # depends on [control=['except'], data=[]] except (KeyError, WrongTypeError): r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION]) # depends on [control=['except'], data=[]] return r
def poll(self): """Retrieves older requests that may not make it back quick enough""" if self.redis_connected: json_item = request.get_json() result = None try: key = "rest:poll:{u}".format(u=json_item['poll_id']) result = self.redis_conn.get(key) if result is not None: result = json.loads(result) self.logger.debug("Found previous poll") self.redis_conn.delete(key) return self._create_ret_object(self.SUCCESS, result) else: self.logger.debug("poll key does not exist") return self._create_ret_object(self.FAILURE, None, True, "Could not find matching poll_id"), 404 except ConnectionError: self.logger.error("Lost connection to Redis") self._spawn_redis_connection_thread() except ValueError: extras = { "value": result } self.logger.warning('Unparseable JSON Received from redis', extra=extras) self.redis_conn.delete(key) return self._create_ret_object(self.FAILURE, None, True, "Unparseable JSON Received " "from redis"), 500 self.logger.warn("Unable to poll redis, not connected") return self._create_ret_object(self.FAILURE, None, True, "Unable to connect to Redis"), 500
def function[poll, parameter[self]]: constant[Retrieves older requests that may not make it back quick enough] if name[self].redis_connected begin[:] variable[json_item] assign[=] call[name[request].get_json, parameter[]] variable[result] assign[=] constant[None] <ast.Try object at 0x7da1b1981ae0> call[name[self].logger.warn, parameter[constant[Unable to poll redis, not connected]]] return[tuple[[<ast.Call object at 0x7da1b1980430>, <ast.Constant object at 0x7da1b1983160>]]]
keyword[def] identifier[poll] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[redis_connected] : identifier[json_item] = identifier[request] . identifier[get_json] () identifier[result] = keyword[None] keyword[try] : identifier[key] = literal[string] . identifier[format] ( identifier[u] = identifier[json_item] [ literal[string] ]) identifier[result] = identifier[self] . identifier[redis_conn] . identifier[get] ( identifier[key] ) keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] : identifier[result] = identifier[json] . identifier[loads] ( identifier[result] ) identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[redis_conn] . identifier[delete] ( identifier[key] ) keyword[return] identifier[self] . identifier[_create_ret_object] ( identifier[self] . identifier[SUCCESS] , identifier[result] ) keyword[else] : identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[self] . identifier[_create_ret_object] ( identifier[self] . identifier[FAILURE] , keyword[None] , keyword[True] , literal[string] ), literal[int] keyword[except] identifier[ConnectionError] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] ) identifier[self] . identifier[_spawn_redis_connection_thread] () keyword[except] identifier[ValueError] : identifier[extras] ={ literal[string] : identifier[result] } identifier[self] . identifier[logger] . identifier[warning] ( literal[string] , identifier[extra] = identifier[extras] ) identifier[self] . identifier[redis_conn] . identifier[delete] ( identifier[key] ) keyword[return] identifier[self] . identifier[_create_ret_object] ( identifier[self] . identifier[FAILURE] , keyword[None] , keyword[True] , literal[string] literal[string] ), literal[int] identifier[self] . identifier[logger] . identifier[warn] ( literal[string] ) keyword[return] identifier[self] . identifier[_create_ret_object] ( identifier[self] . identifier[FAILURE] , keyword[None] , keyword[True] , literal[string] ), literal[int]
def poll(self): """Retrieves older requests that may not make it back quick enough""" if self.redis_connected: json_item = request.get_json() result = None try: key = 'rest:poll:{u}'.format(u=json_item['poll_id']) result = self.redis_conn.get(key) if result is not None: result = json.loads(result) self.logger.debug('Found previous poll') self.redis_conn.delete(key) return self._create_ret_object(self.SUCCESS, result) # depends on [control=['if'], data=['result']] else: self.logger.debug('poll key does not exist') return (self._create_ret_object(self.FAILURE, None, True, 'Could not find matching poll_id'), 404) # depends on [control=['try'], data=[]] except ConnectionError: self.logger.error('Lost connection to Redis') self._spawn_redis_connection_thread() # depends on [control=['except'], data=[]] except ValueError: extras = {'value': result} self.logger.warning('Unparseable JSON Received from redis', extra=extras) self.redis_conn.delete(key) return (self._create_ret_object(self.FAILURE, None, True, 'Unparseable JSON Received from redis'), 500) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] self.logger.warn('Unable to poll redis, not connected') return (self._create_ret_object(self.FAILURE, None, True, 'Unable to connect to Redis'), 500)
def _parse_match_info(match, soccer=False): """ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict """ match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() else: match_info['match_score'] = match[1:].strip() return match_info
def function[_parse_match_info, parameter[match, soccer]]: constant[ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict ] variable[match_info] assign[=] dictionary[[], []] variable[i_open] assign[=] call[name[match].index, parameter[constant[(]]] variable[i_close] assign[=] call[name[match].index, parameter[constant[)]]] call[name[match_info]][constant[league]] assign[=] call[call[name[match]][<ast.Slice object at 0x7da1b047b970>].strip, parameter[]] variable[match] assign[=] call[name[match]][<ast.Slice object at 0x7da1b0478610>] variable[i_vs] assign[=] call[name[match].index, parameter[constant[vs]]] variable[i_colon] assign[=] call[name[match].index, parameter[constant[:]]] call[name[match_info]][constant[home_team]] assign[=] call[call[call[name[match]][<ast.Slice object at 0x7da1b047a200>].replace, parameter[constant[#], constant[ ]]].strip, parameter[]] call[name[match_info]][constant[away_team]] assign[=] call[call[call[name[match]][<ast.Slice object at 0x7da1b0479f30>].replace, parameter[constant[#], constant[ ]]].strip, parameter[]] variable[match] assign[=] call[name[match]][<ast.Slice object at 0x7da1b047b580>] if name[soccer] begin[:] variable[i_hyph] assign[=] call[name[match].index, parameter[constant[-]]] call[name[match_info]][constant[match_score]] assign[=] call[call[name[match]][<ast.Slice object at 0x7da20c6e63e0>].strip, parameter[]] variable[match] assign[=] call[name[match]][<ast.Slice object at 0x7da20c6e6380>] variable[i_hyph] assign[=] call[name[match].index, parameter[constant[-]]] call[name[match_info]][constant[match_time]] assign[=] call[call[name[match]][<ast.Slice object at 0x7da20c6e6650>].strip, parameter[]] return[name[match_info]]
keyword[def] identifier[_parse_match_info] ( identifier[match] , identifier[soccer] = keyword[False] ): literal[string] identifier[match_info] ={} identifier[i_open] = identifier[match] . identifier[index] ( literal[string] ) identifier[i_close] = identifier[match] . identifier[index] ( literal[string] ) identifier[match_info] [ literal[string] ]= identifier[match] [ identifier[i_open] + literal[int] : identifier[i_close] ]. identifier[strip] () identifier[match] = identifier[match] [ identifier[i_close] + literal[int] :] identifier[i_vs] = identifier[match] . identifier[index] ( literal[string] ) identifier[i_colon] = identifier[match] . identifier[index] ( literal[string] ) identifier[match_info] [ literal[string] ]= identifier[match] [ literal[int] : identifier[i_vs] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[strip] () identifier[match_info] [ literal[string] ]= identifier[match] [ identifier[i_vs] + literal[int] : identifier[i_colon] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[strip] () identifier[match] = identifier[match] [ identifier[i_colon] :] keyword[if] identifier[soccer] : identifier[i_hyph] = identifier[match] . identifier[index] ( literal[string] ) identifier[match_info] [ literal[string] ]= identifier[match] [ literal[int] : identifier[i_hyph] + literal[int] ]. identifier[strip] () identifier[match] = identifier[match] [ identifier[i_hyph] + literal[int] :] identifier[i_hyph] = identifier[match] . identifier[index] ( literal[string] ) identifier[match_info] [ literal[string] ]= identifier[match] [ identifier[i_hyph] + literal[int] :]. identifier[strip] () keyword[else] : identifier[match_info] [ literal[string] ]= identifier[match] [ literal[int] :]. identifier[strip] () keyword[return] identifier[match_info]
def _parse_match_info(match, soccer=False): """ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict """ match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() # depends on [control=['if'], data=[]] else: match_info['match_score'] = match[1:].strip() return match_info
def _rel(self, copy=False): """ Get descriptive kwargs of the container (e.g. name, description, meta). """ rel = {} for key, obj in vars(self).items(): if not isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)) and not key.startswith('_'): if copy and 'id' not in key: rel[key] = deepcopy(obj) else: rel[key] = obj return rel
def function[_rel, parameter[self, copy]]: constant[ Get descriptive kwargs of the container (e.g. name, description, meta). ] variable[rel] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da18f00fdc0>, <ast.Name object at 0x7da18f00fc40>]]] in starred[call[call[name[vars], parameter[name[self]]].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da18f00df00> begin[:] if <ast.BoolOp object at 0x7da18f00efb0> begin[:] call[name[rel]][name[key]] assign[=] call[name[deepcopy], parameter[name[obj]]] return[name[rel]]
keyword[def] identifier[_rel] ( identifier[self] , identifier[copy] = keyword[False] ): literal[string] identifier[rel] ={} keyword[for] identifier[key] , identifier[obj] keyword[in] identifier[vars] ( identifier[self] ). identifier[items] (): keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] ,( identifier[pd] . identifier[Series] , identifier[pd] . identifier[DataFrame] , identifier[pd] . identifier[SparseSeries] , identifier[pd] . identifier[SparseDataFrame] )) keyword[and] keyword[not] identifier[key] . identifier[startswith] ( literal[string] ): keyword[if] identifier[copy] keyword[and] literal[string] keyword[not] keyword[in] identifier[key] : identifier[rel] [ identifier[key] ]= identifier[deepcopy] ( identifier[obj] ) keyword[else] : identifier[rel] [ identifier[key] ]= identifier[obj] keyword[return] identifier[rel]
def _rel(self, copy=False): """ Get descriptive kwargs of the container (e.g. name, description, meta). """ rel = {} for (key, obj) in vars(self).items(): if not isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)) and (not key.startswith('_')): if copy and 'id' not in key: rel[key] = deepcopy(obj) # depends on [control=['if'], data=[]] else: rel[key] = obj # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return rel
def _dataframe_to_csv(writer, dataframe, delimiter, with_header): """serialize the dataframe with different delimiters""" encoding_writer = codecs.getwriter('utf-8')(writer) dataframe.to_csv( path_or_buf=encoding_writer, sep=delimiter, header=with_header, index=False )
def function[_dataframe_to_csv, parameter[writer, dataframe, delimiter, with_header]]: constant[serialize the dataframe with different delimiters] variable[encoding_writer] assign[=] call[call[name[codecs].getwriter, parameter[constant[utf-8]]], parameter[name[writer]]] call[name[dataframe].to_csv, parameter[]]
keyword[def] identifier[_dataframe_to_csv] ( identifier[writer] , identifier[dataframe] , identifier[delimiter] , identifier[with_header] ): literal[string] identifier[encoding_writer] = identifier[codecs] . identifier[getwriter] ( literal[string] )( identifier[writer] ) identifier[dataframe] . identifier[to_csv] ( identifier[path_or_buf] = identifier[encoding_writer] , identifier[sep] = identifier[delimiter] , identifier[header] = identifier[with_header] , identifier[index] = keyword[False] )
def _dataframe_to_csv(writer, dataframe, delimiter, with_header): """serialize the dataframe with different delimiters""" encoding_writer = codecs.getwriter('utf-8')(writer) dataframe.to_csv(path_or_buf=encoding_writer, sep=delimiter, header=with_header, index=False)
def query(self, tablename, attributes=None, consistent=False, count=False, index=None, limit=None, desc=False, return_capacity=None, filter=None, filter_or=False, exclusive_start_key=None, **kwargs): """ Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10)) """ keywords = { 'TableName': tablename, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ConsistentRead': consistent, 'ScanIndexForward': not desc, 'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs), } if attributes is not None: keywords['AttributesToGet'] = attributes if index is not None: keywords['IndexName'] = index if filter is not None: if len(filter) > 1: keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND' keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer, filter) if exclusive_start_key is not None: keywords['ExclusiveStartKey'] = \ self.dynamizer.maybe_encode_keys(exclusive_start_key) if not isinstance(limit, Limit): limit = Limit(limit) if count: keywords['Select'] = COUNT return self._count('query', limit, keywords) else: return ResultSet(self, limit, 'query', **keywords)
def function[query, parameter[self, tablename, attributes, consistent, count, index, limit, desc, return_capacity, filter, filter_or, exclusive_start_key]]: constant[ Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10)) ] variable[keywords] assign[=] dictionary[[<ast.Constant object at 0x7da1b24b11b0>, <ast.Constant object at 0x7da1b24b3be0>, <ast.Constant object at 0x7da1b24b0a30>, <ast.Constant object at 0x7da1b24b0b20>, <ast.Constant object at 0x7da1b24b0c40>], [<ast.Name object at 0x7da1b24b1660>, <ast.Call object at 0x7da1b24b1750>, <ast.Name object at 0x7da1b24b0d90>, <ast.UnaryOp object at 0x7da1b24b1a20>, <ast.Call object at 0x7da1b24b0910>]] if compare[name[attributes] is_not constant[None]] begin[:] call[name[keywords]][constant[AttributesToGet]] assign[=] name[attributes] if compare[name[index] is_not constant[None]] begin[:] call[name[keywords]][constant[IndexName]] assign[=] name[index] if compare[name[filter] is_not constant[None]] begin[:] if compare[call[name[len], parameter[name[filter]]] greater[>] constant[1]] begin[:] call[name[keywords]][constant[ConditionalOperator]] assign[=] <ast.IfExp object at 0x7da1b24b0430> call[name[keywords]][constant[QueryFilter]] assign[=] call[name[encode_query_kwargs], parameter[name[self].dynamizer, name[filter]]] if compare[name[exclusive_start_key] is_not constant[None]] begin[:] call[name[keywords]][constant[ExclusiveStartKey]] assign[=] call[name[self].dynamizer.maybe_encode_keys, parameter[name[exclusive_start_key]]] if <ast.UnaryOp object at 0x7da1b24b11e0> begin[:] variable[limit] assign[=] call[name[Limit], parameter[name[limit]]] if name[count] begin[:] call[name[keywords]][constant[Select]] assign[=] name[COUNT] return[call[name[self]._count, parameter[constant[query], name[limit], name[keywords]]]]
keyword[def] identifier[query] ( identifier[self] , identifier[tablename] , identifier[attributes] = keyword[None] , identifier[consistent] = keyword[False] , identifier[count] = keyword[False] , identifier[index] = keyword[None] , identifier[limit] = keyword[None] , identifier[desc] = keyword[False] , identifier[return_capacity] = keyword[None] , identifier[filter] = keyword[None] , identifier[filter_or] = keyword[False] , identifier[exclusive_start_key] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[keywords] ={ literal[string] : identifier[tablename] , literal[string] : identifier[self] . identifier[_default_capacity] ( identifier[return_capacity] ), literal[string] : identifier[consistent] , literal[string] : keyword[not] identifier[desc] , literal[string] : identifier[encode_query_kwargs] ( identifier[self] . identifier[dynamizer] , identifier[kwargs] ), } keyword[if] identifier[attributes] keyword[is] keyword[not] keyword[None] : identifier[keywords] [ literal[string] ]= identifier[attributes] keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] : identifier[keywords] [ literal[string] ]= identifier[index] keyword[if] identifier[filter] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[len] ( identifier[filter] )> literal[int] : identifier[keywords] [ literal[string] ]= literal[string] keyword[if] identifier[filter_or] keyword[else] literal[string] identifier[keywords] [ literal[string] ]= identifier[encode_query_kwargs] ( identifier[self] . identifier[dynamizer] , identifier[filter] ) keyword[if] identifier[exclusive_start_key] keyword[is] keyword[not] keyword[None] : identifier[keywords] [ literal[string] ]= identifier[self] . identifier[dynamizer] . identifier[maybe_encode_keys] ( identifier[exclusive_start_key] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[limit] , identifier[Limit] ): identifier[limit] = identifier[Limit] ( identifier[limit] ) keyword[if] identifier[count] : identifier[keywords] [ literal[string] ]= identifier[COUNT] keyword[return] identifier[self] . identifier[_count] ( literal[string] , identifier[limit] , identifier[keywords] ) keyword[else] : keyword[return] identifier[ResultSet] ( identifier[self] , identifier[limit] , literal[string] ,** identifier[keywords] )
def query(self, tablename, attributes=None, consistent=False, count=False, index=None, limit=None, desc=False, return_capacity=None, filter=None, filter_or=False, exclusive_start_key=None, **kwargs): """ Perform an index query on a table This uses the older version of the DynamoDB API. See also: :meth:`~.query2`. Parameters ---------- tablename : str Name of the table to query attributes : list If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) count : bool, optional If True, return a count of matched items instead of the items themselves (default False) index : str, optional The name of the index to query limit : int, optional Maximum number of items to return desc : bool, optional If True, return items in descending order (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : dict, optional Query arguments. Same format as **kwargs, but these arguments filter the results on the server before they are returned. They will NOT use an index, as that is what the **kwargs are for. filter_or : bool, optional If True, multiple filter args will be OR'd together. If False, they will be AND'd together. (default False) exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional Query arguments (examples below) Examples -------- You may pass in constraints using the Django-style '__' syntax. For example: .. code-block:: python connection.query('mytable', foo__eq=5) connection.query('mytable', foo__eq=5, bar__lt=22) connection.query('mytable', foo__eq=5, bar__between=(1, 10)) """ keywords = {'TableName': tablename, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ConsistentRead': consistent, 'ScanIndexForward': not desc, 'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs)} if attributes is not None: keywords['AttributesToGet'] = attributes # depends on [control=['if'], data=['attributes']] if index is not None: keywords['IndexName'] = index # depends on [control=['if'], data=['index']] if filter is not None: if len(filter) > 1: keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND' # depends on [control=['if'], data=[]] keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer, filter) # depends on [control=['if'], data=['filter']] if exclusive_start_key is not None: keywords['ExclusiveStartKey'] = self.dynamizer.maybe_encode_keys(exclusive_start_key) # depends on [control=['if'], data=['exclusive_start_key']] if not isinstance(limit, Limit): limit = Limit(limit) # depends on [control=['if'], data=[]] if count: keywords['Select'] = COUNT return self._count('query', limit, keywords) # depends on [control=['if'], data=[]] else: return ResultSet(self, limit, 'query', **keywords)
def get_variant_info(genes): """Get variant information""" data = {'canonical_transcripts': []} for gene_obj in genes: if not gene_obj.get('canonical_transcripts'): tx = gene_obj['transcripts'][0] tx_id = tx['transcript_id'] exon = tx.get('exon', '-') c_seq = tx.get('coding_sequence_name', '-') else: tx_id = gene_obj['canonical_transcripts'] exon = gene_obj.get('exon', '-') c_seq = gene_obj.get('hgvs_identifier', '-') if len(c_seq) > 20: c_seq = c_seq[:20] + '...' if len(genes) == 1: value = ':'.join([tx_id,exon,c_seq]) else: gene_id = gene_obj.get('hgnc_symbol') or str(gene_obj['hgnc_id']) value = ':'.join([gene_id, tx_id,exon,c_seq]) data['canonical_transcripts'].append(value) return data
def function[get_variant_info, parameter[genes]]: constant[Get variant information] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20cabd690>], [<ast.List object at 0x7da20cabed70>]] for taget[name[gene_obj]] in starred[name[genes]] begin[:] if <ast.UnaryOp object at 0x7da20cabc370> begin[:] variable[tx] assign[=] call[call[name[gene_obj]][constant[transcripts]]][constant[0]] variable[tx_id] assign[=] call[name[tx]][constant[transcript_id]] variable[exon] assign[=] call[name[tx].get, parameter[constant[exon], constant[-]]] variable[c_seq] assign[=] call[name[tx].get, parameter[constant[coding_sequence_name], constant[-]]] if compare[call[name[len], parameter[name[c_seq]]] greater[>] constant[20]] begin[:] variable[c_seq] assign[=] binary_operation[call[name[c_seq]][<ast.Slice object at 0x7da1b2345210>] + constant[...]] if compare[call[name[len], parameter[name[genes]]] equal[==] constant[1]] begin[:] variable[value] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da1b2346350>, <ast.Name object at 0x7da1b2345c30>, <ast.Name object at 0x7da1b2346a70>]]]] call[call[name[data]][constant[canonical_transcripts]].append, parameter[name[value]]] return[name[data]]
keyword[def] identifier[get_variant_info] ( identifier[genes] ): literal[string] identifier[data] ={ literal[string] :[]} keyword[for] identifier[gene_obj] keyword[in] identifier[genes] : keyword[if] keyword[not] identifier[gene_obj] . identifier[get] ( literal[string] ): identifier[tx] = identifier[gene_obj] [ literal[string] ][ literal[int] ] identifier[tx_id] = identifier[tx] [ literal[string] ] identifier[exon] = identifier[tx] . identifier[get] ( literal[string] , literal[string] ) identifier[c_seq] = identifier[tx] . identifier[get] ( literal[string] , literal[string] ) keyword[else] : identifier[tx_id] = identifier[gene_obj] [ literal[string] ] identifier[exon] = identifier[gene_obj] . identifier[get] ( literal[string] , literal[string] ) identifier[c_seq] = identifier[gene_obj] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[len] ( identifier[c_seq] )> literal[int] : identifier[c_seq] = identifier[c_seq] [: literal[int] ]+ literal[string] keyword[if] identifier[len] ( identifier[genes] )== literal[int] : identifier[value] = literal[string] . identifier[join] ([ identifier[tx_id] , identifier[exon] , identifier[c_seq] ]) keyword[else] : identifier[gene_id] = identifier[gene_obj] . identifier[get] ( literal[string] ) keyword[or] identifier[str] ( identifier[gene_obj] [ literal[string] ]) identifier[value] = literal[string] . identifier[join] ([ identifier[gene_id] , identifier[tx_id] , identifier[exon] , identifier[c_seq] ]) identifier[data] [ literal[string] ]. identifier[append] ( identifier[value] ) keyword[return] identifier[data]
def get_variant_info(genes): """Get variant information""" data = {'canonical_transcripts': []} for gene_obj in genes: if not gene_obj.get('canonical_transcripts'): tx = gene_obj['transcripts'][0] tx_id = tx['transcript_id'] exon = tx.get('exon', '-') c_seq = tx.get('coding_sequence_name', '-') # depends on [control=['if'], data=[]] else: tx_id = gene_obj['canonical_transcripts'] exon = gene_obj.get('exon', '-') c_seq = gene_obj.get('hgvs_identifier', '-') if len(c_seq) > 20: c_seq = c_seq[:20] + '...' # depends on [control=['if'], data=[]] if len(genes) == 1: value = ':'.join([tx_id, exon, c_seq]) # depends on [control=['if'], data=[]] else: gene_id = gene_obj.get('hgnc_symbol') or str(gene_obj['hgnc_id']) value = ':'.join([gene_id, tx_id, exon, c_seq]) data['canonical_transcripts'].append(value) # depends on [control=['for'], data=['gene_obj']] return data
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any] """ new_query = query c_filter_list = [] for child in self._childs: new_query, f_list = child.filter(new_query, entity) c_filter_list.append(f_list) return ( new_query, self._method(*c_filter_list) )
def function[filter, parameter[self, query, entity]]: constant[Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any] ] variable[new_query] assign[=] name[query] variable[c_filter_list] assign[=] list[[]] for taget[name[child]] in starred[name[self]._childs] begin[:] <ast.Tuple object at 0x7da1b10c0550> assign[=] call[name[child].filter, parameter[name[new_query], name[entity]]] call[name[c_filter_list].append, parameter[name[f_list]]] return[tuple[[<ast.Name object at 0x7da1b10d7ca0>, <ast.Call object at 0x7da1b10d7df0>]]]
keyword[def] identifier[filter] ( identifier[self] , identifier[query] : identifier[Query] , identifier[entity] : identifier[type] )-> identifier[Tuple] [ identifier[Query] , identifier[Any] ]: literal[string] identifier[new_query] = identifier[query] identifier[c_filter_list] =[] keyword[for] identifier[child] keyword[in] identifier[self] . identifier[_childs] : identifier[new_query] , identifier[f_list] = identifier[child] . identifier[filter] ( identifier[new_query] , identifier[entity] ) identifier[c_filter_list] . identifier[append] ( identifier[f_list] ) keyword[return] ( identifier[new_query] , identifier[self] . identifier[_method] (* identifier[c_filter_list] ) )
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any] """ new_query = query c_filter_list = [] for child in self._childs: (new_query, f_list) = child.filter(new_query, entity) c_filter_list.append(f_list) # depends on [control=['for'], data=['child']] return (new_query, self._method(*c_filter_list))
def dnde(self, x, params=None): """Evaluate differential flux.""" params = self.params if params is None else params return np.squeeze(self.eval_dnde(x, params, self.scale, self.extra_params))
def function[dnde, parameter[self, x, params]]: constant[Evaluate differential flux.] variable[params] assign[=] <ast.IfExp object at 0x7da207f99de0> return[call[name[np].squeeze, parameter[call[name[self].eval_dnde, parameter[name[x], name[params], name[self].scale, name[self].extra_params]]]]]
keyword[def] identifier[dnde] ( identifier[self] , identifier[x] , identifier[params] = keyword[None] ): literal[string] identifier[params] = identifier[self] . identifier[params] keyword[if] identifier[params] keyword[is] keyword[None] keyword[else] identifier[params] keyword[return] identifier[np] . identifier[squeeze] ( identifier[self] . identifier[eval_dnde] ( identifier[x] , identifier[params] , identifier[self] . identifier[scale] , identifier[self] . identifier[extra_params] ))
def dnde(self, x, params=None): """Evaluate differential flux.""" params = self.params if params is None else params return np.squeeze(self.eval_dnde(x, params, self.scale, self.extra_params))
def heap_push(heap, row, weight, index, flag): """Push a new element onto the heap. The heap stores potential neighbors for each data point. The ``row`` parameter determines which data point we are addressing, the ``weight`` determines the distance (for heap sorting), the ``index`` is the element to add, and the flag determines whether this is to be considered a new addition. Parameters ---------- heap: ndarray generated by ``make_heap`` The heap object to push into row: int Which actual heap within the heap object to push to weight: float The priority value of the element to push onto the heap index: int The actual value to be pushed flag: int Whether to flag the newly added element or not. Returns ------- success: The number of new elements successfully pushed into the heap. """ indices = heap[0, row] weights = heap[1, row] is_new = heap[2, row] if weight >= weights[0]: return 0 # break if we already have this element. for i in range(indices.shape[0]): if index == indices[i]: return 0 # insert val at position zero weights[0] = weight indices[0] = index is_new[0] = flag # descend the heap, swapping values until the max heap criterion is met i = 0 while True: ic1 = 2 * i + 1 ic2 = ic1 + 1 if ic1 >= heap.shape[2]: break elif ic2 >= heap.shape[2]: if weights[ic1] > weight: i_swap = ic1 else: break elif weights[ic1] >= weights[ic2]: if weight < weights[ic1]: i_swap = ic1 else: break else: if weight < weights[ic2]: i_swap = ic2 else: break weights[i] = weights[i_swap] indices[i] = indices[i_swap] is_new[i] = is_new[i_swap] i = i_swap weights[i] = weight indices[i] = index is_new[i] = flag return 1
def function[heap_push, parameter[heap, row, weight, index, flag]]: constant[Push a new element onto the heap. The heap stores potential neighbors for each data point. The ``row`` parameter determines which data point we are addressing, the ``weight`` determines the distance (for heap sorting), the ``index`` is the element to add, and the flag determines whether this is to be considered a new addition. Parameters ---------- heap: ndarray generated by ``make_heap`` The heap object to push into row: int Which actual heap within the heap object to push to weight: float The priority value of the element to push onto the heap index: int The actual value to be pushed flag: int Whether to flag the newly added element or not. Returns ------- success: The number of new elements successfully pushed into the heap. ] variable[indices] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da1b1d6faf0>, <ast.Name object at 0x7da1b1d6eb30>]]] variable[weights] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da1b1d6c610>, <ast.Name object at 0x7da1b1d6dff0>]]] variable[is_new] assign[=] call[name[heap]][tuple[[<ast.Constant object at 0x7da1b1d6eda0>, <ast.Name object at 0x7da1b1d6fe50>]]] if compare[name[weight] greater_or_equal[>=] call[name[weights]][constant[0]]] begin[:] return[constant[0]] for taget[name[i]] in starred[call[name[range], parameter[call[name[indices].shape][constant[0]]]]] begin[:] if compare[name[index] equal[==] call[name[indices]][name[i]]] begin[:] return[constant[0]] call[name[weights]][constant[0]] assign[=] name[weight] call[name[indices]][constant[0]] assign[=] name[index] call[name[is_new]][constant[0]] assign[=] name[flag] variable[i] assign[=] constant[0] while constant[True] begin[:] variable[ic1] assign[=] binary_operation[binary_operation[constant[2] * name[i]] + constant[1]] variable[ic2] assign[=] binary_operation[name[ic1] + constant[1]] if compare[name[ic1] greater_or_equal[>=] call[name[heap].shape][constant[2]]] begin[:] break call[name[weights]][name[i]] assign[=] call[name[weights]][name[i_swap]] call[name[indices]][name[i]] assign[=] call[name[indices]][name[i_swap]] call[name[is_new]][name[i]] assign[=] call[name[is_new]][name[i_swap]] variable[i] assign[=] name[i_swap] call[name[weights]][name[i]] assign[=] name[weight] call[name[indices]][name[i]] assign[=] name[index] call[name[is_new]][name[i]] assign[=] name[flag] return[constant[1]]
keyword[def] identifier[heap_push] ( identifier[heap] , identifier[row] , identifier[weight] , identifier[index] , identifier[flag] ): literal[string] identifier[indices] = identifier[heap] [ literal[int] , identifier[row] ] identifier[weights] = identifier[heap] [ literal[int] , identifier[row] ] identifier[is_new] = identifier[heap] [ literal[int] , identifier[row] ] keyword[if] identifier[weight] >= identifier[weights] [ literal[int] ]: keyword[return] literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[indices] . identifier[shape] [ literal[int] ]): keyword[if] identifier[index] == identifier[indices] [ identifier[i] ]: keyword[return] literal[int] identifier[weights] [ literal[int] ]= identifier[weight] identifier[indices] [ literal[int] ]= identifier[index] identifier[is_new] [ literal[int] ]= identifier[flag] identifier[i] = literal[int] keyword[while] keyword[True] : identifier[ic1] = literal[int] * identifier[i] + literal[int] identifier[ic2] = identifier[ic1] + literal[int] keyword[if] identifier[ic1] >= identifier[heap] . identifier[shape] [ literal[int] ]: keyword[break] keyword[elif] identifier[ic2] >= identifier[heap] . identifier[shape] [ literal[int] ]: keyword[if] identifier[weights] [ identifier[ic1] ]> identifier[weight] : identifier[i_swap] = identifier[ic1] keyword[else] : keyword[break] keyword[elif] identifier[weights] [ identifier[ic1] ]>= identifier[weights] [ identifier[ic2] ]: keyword[if] identifier[weight] < identifier[weights] [ identifier[ic1] ]: identifier[i_swap] = identifier[ic1] keyword[else] : keyword[break] keyword[else] : keyword[if] identifier[weight] < identifier[weights] [ identifier[ic2] ]: identifier[i_swap] = identifier[ic2] keyword[else] : keyword[break] identifier[weights] [ identifier[i] ]= identifier[weights] [ identifier[i_swap] ] identifier[indices] [ identifier[i] ]= identifier[indices] [ identifier[i_swap] ] identifier[is_new] [ identifier[i] ]= identifier[is_new] [ identifier[i_swap] ] identifier[i] = identifier[i_swap] identifier[weights] [ identifier[i] ]= identifier[weight] identifier[indices] [ identifier[i] ]= identifier[index] identifier[is_new] [ identifier[i] ]= identifier[flag] keyword[return] literal[int]
def heap_push(heap, row, weight, index, flag): """Push a new element onto the heap. The heap stores potential neighbors for each data point. The ``row`` parameter determines which data point we are addressing, the ``weight`` determines the distance (for heap sorting), the ``index`` is the element to add, and the flag determines whether this is to be considered a new addition. Parameters ---------- heap: ndarray generated by ``make_heap`` The heap object to push into row: int Which actual heap within the heap object to push to weight: float The priority value of the element to push onto the heap index: int The actual value to be pushed flag: int Whether to flag the newly added element or not. Returns ------- success: The number of new elements successfully pushed into the heap. """ indices = heap[0, row] weights = heap[1, row] is_new = heap[2, row] if weight >= weights[0]: return 0 # depends on [control=['if'], data=[]] # break if we already have this element. for i in range(indices.shape[0]): if index == indices[i]: return 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # insert val at position zero weights[0] = weight indices[0] = index is_new[0] = flag # descend the heap, swapping values until the max heap criterion is met i = 0 while True: ic1 = 2 * i + 1 ic2 = ic1 + 1 if ic1 >= heap.shape[2]: break # depends on [control=['if'], data=[]] elif ic2 >= heap.shape[2]: if weights[ic1] > weight: i_swap = ic1 # depends on [control=['if'], data=[]] else: break # depends on [control=['if'], data=[]] elif weights[ic1] >= weights[ic2]: if weight < weights[ic1]: i_swap = ic1 # depends on [control=['if'], data=[]] else: break # depends on [control=['if'], data=[]] elif weight < weights[ic2]: i_swap = ic2 # depends on [control=['if'], data=[]] else: break weights[i] = weights[i_swap] indices[i] = indices[i_swap] is_new[i] = is_new[i_swap] i = i_swap # depends on [control=['while'], data=[]] weights[i] = weight indices[i] = index is_new[i] = flag return 1
def create_circuit_termination(circuit, interface, device, speed, xconnect_id=None, term_side='A'): ''' .. versionadded:: 2019.2.0 Terminate a circuit on an interface circuit The name of the circuit interface The name of the interface to terminate on device The name of the device the interface belongs to speed The speed of the circuit, in Kbps xconnect_id The cross-connect identifier term_side The side of the circuit termination CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_termination NEW_CIRCUIT_01 xe-0/0/1 myminion 10000 xconnect_id=XCON01 ''' nb_device = get_('dcim', 'devices', name=device) nb_interface = get_('dcim', 'interfaces', device_id=nb_device['id'], name=interface) nb_circuit = get_('circuits', 'circuits', cid=circuit) if nb_circuit and nb_device: nb_termination = get_('circuits', 'circuit-terminations', q=nb_circuit['cid']) if nb_termination: return False payload = { 'circuit': nb_circuit['id'], 'interface': nb_interface['id'], 'site': nb_device['site']['id'], 'port_speed': speed, 'term_side': term_side } if xconnect_id: payload['xconnect_id'] = xconnect_id circuit_termination = _add('circuits', 'circuit-terminations', payload) if circuit_termination: return {'circuits': {'circuit-terminations': {circuit_termination['id']: payload}}} else: return circuit_termination
def function[create_circuit_termination, parameter[circuit, interface, device, speed, xconnect_id, term_side]]: constant[ .. versionadded:: 2019.2.0 Terminate a circuit on an interface circuit The name of the circuit interface The name of the interface to terminate on device The name of the device the interface belongs to speed The speed of the circuit, in Kbps xconnect_id The cross-connect identifier term_side The side of the circuit termination CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_termination NEW_CIRCUIT_01 xe-0/0/1 myminion 10000 xconnect_id=XCON01 ] variable[nb_device] assign[=] call[name[get_], parameter[constant[dcim], constant[devices]]] variable[nb_interface] assign[=] call[name[get_], parameter[constant[dcim], constant[interfaces]]] variable[nb_circuit] assign[=] call[name[get_], parameter[constant[circuits], constant[circuits]]] if <ast.BoolOp object at 0x7da18ede50c0> begin[:] variable[nb_termination] assign[=] call[name[get_], parameter[constant[circuits], constant[circuit-terminations]]] if name[nb_termination] begin[:] return[constant[False]] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da18ede7a30>, <ast.Constant object at 0x7da18ede4340>, <ast.Constant object at 0x7da18ede68f0>, <ast.Constant object at 0x7da18ede7f40>, <ast.Constant object at 0x7da18ede5a80>], [<ast.Subscript object at 0x7da18ede40d0>, <ast.Subscript object at 0x7da18ede6e60>, <ast.Subscript object at 0x7da18ede72b0>, <ast.Name object at 0x7da18ede6260>, <ast.Name object at 0x7da18ede4280>]] if name[xconnect_id] begin[:] call[name[payload]][constant[xconnect_id]] assign[=] name[xconnect_id] variable[circuit_termination] assign[=] call[name[_add], parameter[constant[circuits], constant[circuit-terminations], name[payload]]] if name[circuit_termination] begin[:] return[dictionary[[<ast.Constant object at 0x7da18ede5270>], [<ast.Dict object at 0x7da18ede7f70>]]]
keyword[def] identifier[create_circuit_termination] ( identifier[circuit] , identifier[interface] , identifier[device] , identifier[speed] , identifier[xconnect_id] = keyword[None] , identifier[term_side] = literal[string] ): literal[string] identifier[nb_device] = identifier[get_] ( literal[string] , literal[string] , identifier[name] = identifier[device] ) identifier[nb_interface] = identifier[get_] ( literal[string] , literal[string] , identifier[device_id] = identifier[nb_device] [ literal[string] ], identifier[name] = identifier[interface] ) identifier[nb_circuit] = identifier[get_] ( literal[string] , literal[string] , identifier[cid] = identifier[circuit] ) keyword[if] identifier[nb_circuit] keyword[and] identifier[nb_device] : identifier[nb_termination] = identifier[get_] ( literal[string] , literal[string] , identifier[q] = identifier[nb_circuit] [ literal[string] ]) keyword[if] identifier[nb_termination] : keyword[return] keyword[False] identifier[payload] ={ literal[string] : identifier[nb_circuit] [ literal[string] ], literal[string] : identifier[nb_interface] [ literal[string] ], literal[string] : identifier[nb_device] [ literal[string] ][ literal[string] ], literal[string] : identifier[speed] , literal[string] : identifier[term_side] } keyword[if] identifier[xconnect_id] : identifier[payload] [ literal[string] ]= identifier[xconnect_id] identifier[circuit_termination] = identifier[_add] ( literal[string] , literal[string] , identifier[payload] ) keyword[if] identifier[circuit_termination] : keyword[return] { literal[string] :{ literal[string] :{ identifier[circuit_termination] [ literal[string] ]: identifier[payload] }}} keyword[else] : keyword[return] identifier[circuit_termination]
def create_circuit_termination(circuit, interface, device, speed, xconnect_id=None, term_side='A'): """ .. versionadded:: 2019.2.0 Terminate a circuit on an interface circuit The name of the circuit interface The name of the interface to terminate on device The name of the device the interface belongs to speed The speed of the circuit, in Kbps xconnect_id The cross-connect identifier term_side The side of the circuit termination CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_termination NEW_CIRCUIT_01 xe-0/0/1 myminion 10000 xconnect_id=XCON01 """ nb_device = get_('dcim', 'devices', name=device) nb_interface = get_('dcim', 'interfaces', device_id=nb_device['id'], name=interface) nb_circuit = get_('circuits', 'circuits', cid=circuit) if nb_circuit and nb_device: nb_termination = get_('circuits', 'circuit-terminations', q=nb_circuit['cid']) if nb_termination: return False # depends on [control=['if'], data=[]] payload = {'circuit': nb_circuit['id'], 'interface': nb_interface['id'], 'site': nb_device['site']['id'], 'port_speed': speed, 'term_side': term_side} if xconnect_id: payload['xconnect_id'] = xconnect_id # depends on [control=['if'], data=[]] circuit_termination = _add('circuits', 'circuit-terminations', payload) if circuit_termination: return {'circuits': {'circuit-terminations': {circuit_termination['id']: payload}}} # depends on [control=['if'], data=[]] else: return circuit_termination # depends on [control=['if'], data=[]]
def assert_deepcopy_idempotent(obj): '''Assert that obj does not change (w.r.t. ==) under repeated deepcopies ''' from copy import deepcopy obj1 = deepcopy(obj) obj2 = deepcopy(obj1) obj3 = deepcopy(obj2) assert_equivalent(obj, obj1) assert_equivalent(obj, obj2) assert_equivalent(obj, obj3) assert type(obj) is type(obj3)
def function[assert_deepcopy_idempotent, parameter[obj]]: constant[Assert that obj does not change (w.r.t. ==) under repeated deepcopies ] from relative_module[copy] import module[deepcopy] variable[obj1] assign[=] call[name[deepcopy], parameter[name[obj]]] variable[obj2] assign[=] call[name[deepcopy], parameter[name[obj1]]] variable[obj3] assign[=] call[name[deepcopy], parameter[name[obj2]]] call[name[assert_equivalent], parameter[name[obj], name[obj1]]] call[name[assert_equivalent], parameter[name[obj], name[obj2]]] call[name[assert_equivalent], parameter[name[obj], name[obj3]]] assert[compare[call[name[type], parameter[name[obj]]] is call[name[type], parameter[name[obj3]]]]]
keyword[def] identifier[assert_deepcopy_idempotent] ( identifier[obj] ): literal[string] keyword[from] identifier[copy] keyword[import] identifier[deepcopy] identifier[obj1] = identifier[deepcopy] ( identifier[obj] ) identifier[obj2] = identifier[deepcopy] ( identifier[obj1] ) identifier[obj3] = identifier[deepcopy] ( identifier[obj2] ) identifier[assert_equivalent] ( identifier[obj] , identifier[obj1] ) identifier[assert_equivalent] ( identifier[obj] , identifier[obj2] ) identifier[assert_equivalent] ( identifier[obj] , identifier[obj3] ) keyword[assert] identifier[type] ( identifier[obj] ) keyword[is] identifier[type] ( identifier[obj3] )
def assert_deepcopy_idempotent(obj): """Assert that obj does not change (w.r.t. ==) under repeated deepcopies """ from copy import deepcopy obj1 = deepcopy(obj) obj2 = deepcopy(obj1) obj3 = deepcopy(obj2) assert_equivalent(obj, obj1) assert_equivalent(obj, obj2) assert_equivalent(obj, obj3) assert type(obj) is type(obj3)
def copytree_and_gzip(self, source_dir, target_dir): """ Copies the provided source directory to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way. """ # Figure out what we're building... build_list = [] # Walk through the source directory... for (dirpath, dirnames, filenames) in os.walk(source_dir): for f in filenames: # Figure out what is going where source_path = os.path.join(dirpath, f) rel_path = os.path.relpath(dirpath, source_dir) target_path = os.path.join(target_dir, rel_path, f) # Add it to our list to build build_list.append((source_path, target_path)) logger.debug("Gzipping {} files".format(len(build_list))) # Build em all if not getattr(self, 'pooling', False): [self.copyfile_and_gzip(*u) for u in build_list] else: cpu_count = multiprocessing.cpu_count() logger.debug("Pooling build on {} CPUs".format(cpu_count)) pool = ThreadPool(processes=cpu_count) pool.map(self.pooled_copyfile_and_gzip, build_list)
def function[copytree_and_gzip, parameter[self, source_dir, target_dir]]: constant[ Copies the provided source directory to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way. ] variable[build_list] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2041d8220>, <ast.Name object at 0x7da2041dbac0>, <ast.Name object at 0x7da2041d9090>]]] in starred[call[name[os].walk, parameter[name[source_dir]]]] begin[:] for taget[name[f]] in starred[name[filenames]] begin[:] variable[source_path] assign[=] call[name[os].path.join, parameter[name[dirpath], name[f]]] variable[rel_path] assign[=] call[name[os].path.relpath, parameter[name[dirpath], name[source_dir]]] variable[target_path] assign[=] call[name[os].path.join, parameter[name[target_dir], name[rel_path], name[f]]] call[name[build_list].append, parameter[tuple[[<ast.Name object at 0x7da2041da2f0>, <ast.Name object at 0x7da2041da890>]]]] call[name[logger].debug, parameter[call[constant[Gzipping {} files].format, parameter[call[name[len], parameter[name[build_list]]]]]]] if <ast.UnaryOp object at 0x7da2041d8790> begin[:] <ast.ListComp object at 0x7da2041daa70>
keyword[def] identifier[copytree_and_gzip] ( identifier[self] , identifier[source_dir] , identifier[target_dir] ): literal[string] identifier[build_list] =[] keyword[for] ( identifier[dirpath] , identifier[dirnames] , identifier[filenames] ) keyword[in] identifier[os] . identifier[walk] ( identifier[source_dir] ): keyword[for] identifier[f] keyword[in] identifier[filenames] : identifier[source_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[f] ) identifier[rel_path] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[dirpath] , identifier[source_dir] ) identifier[target_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , identifier[rel_path] , identifier[f] ) identifier[build_list] . identifier[append] (( identifier[source_path] , identifier[target_path] )) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[build_list] ))) keyword[if] keyword[not] identifier[getattr] ( identifier[self] , literal[string] , keyword[False] ): [ identifier[self] . identifier[copyfile_and_gzip] (* identifier[u] ) keyword[for] identifier[u] keyword[in] identifier[build_list] ] keyword[else] : identifier[cpu_count] = identifier[multiprocessing] . identifier[cpu_count] () identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cpu_count] )) identifier[pool] = identifier[ThreadPool] ( identifier[processes] = identifier[cpu_count] ) identifier[pool] . identifier[map] ( identifier[self] . identifier[pooled_copyfile_and_gzip] , identifier[build_list] )
def copytree_and_gzip(self, source_dir, target_dir): """ Copies the provided source directory to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way. """ # Figure out what we're building... build_list = [] # Walk through the source directory... for (dirpath, dirnames, filenames) in os.walk(source_dir): for f in filenames: # Figure out what is going where source_path = os.path.join(dirpath, f) rel_path = os.path.relpath(dirpath, source_dir) target_path = os.path.join(target_dir, rel_path, f) # Add it to our list to build build_list.append((source_path, target_path)) # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]] logger.debug('Gzipping {} files'.format(len(build_list))) # Build em all if not getattr(self, 'pooling', False): [self.copyfile_and_gzip(*u) for u in build_list] # depends on [control=['if'], data=[]] else: cpu_count = multiprocessing.cpu_count() logger.debug('Pooling build on {} CPUs'.format(cpu_count)) pool = ThreadPool(processes=cpu_count) pool.map(self.pooled_copyfile_and_gzip, build_list)
def implicit_dynamic(cls, for_type=None, for_types=None): """Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.functions(): implementations[function] = cls._build_late_dispatcher( func_name=function.__name__) cls.implement(for_type=type_, implementations=implementations)
def function[implicit_dynamic, parameter[cls, for_type, for_types]]: constant[Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with. ] for taget[name[type_]] in starred[call[name[cls].__get_type_args, parameter[name[for_type], name[for_types]]]] begin[:] variable[implementations] assign[=] dictionary[[], []] for taget[name[function]] in starred[call[name[cls].functions, parameter[]]] begin[:] call[name[implementations]][name[function]] assign[=] call[name[cls]._build_late_dispatcher, parameter[]] call[name[cls].implement, parameter[]]
keyword[def] identifier[implicit_dynamic] ( identifier[cls] , identifier[for_type] = keyword[None] , identifier[for_types] = keyword[None] ): literal[string] keyword[for] identifier[type_] keyword[in] identifier[cls] . identifier[__get_type_args] ( identifier[for_type] , identifier[for_types] ): identifier[implementations] ={} keyword[for] identifier[function] keyword[in] identifier[cls] . identifier[functions] (): identifier[implementations] [ identifier[function] ]= identifier[cls] . identifier[_build_late_dispatcher] ( identifier[func_name] = identifier[function] . identifier[__name__] ) identifier[cls] . identifier[implement] ( identifier[for_type] = identifier[type_] , identifier[implementations] = identifier[implementations] )
def implicit_dynamic(cls, for_type=None, for_types=None): """Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.functions(): implementations[function] = cls._build_late_dispatcher(func_name=function.__name__) # depends on [control=['for'], data=['function']] cls.implement(for_type=type_, implementations=implementations) # depends on [control=['for'], data=['type_']]
def teardown_request(self, func: Callable) -> Callable: """Add a teardown request function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_request`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_request def teardown(): ... """ self.record_once(lambda state: state.app.teardown_request(func, self.name)) return func
def function[teardown_request, parameter[self, func]]: constant[Add a teardown request function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_request`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_request def teardown(): ... ] call[name[self].record_once, parameter[<ast.Lambda object at 0x7da204960310>]] return[name[func]]
keyword[def] identifier[teardown_request] ( identifier[self] , identifier[func] : identifier[Callable] )-> identifier[Callable] : literal[string] identifier[self] . identifier[record_once] ( keyword[lambda] identifier[state] : identifier[state] . identifier[app] . identifier[teardown_request] ( identifier[func] , identifier[self] . identifier[name] )) keyword[return] identifier[func]
def teardown_request(self, func: Callable) -> Callable: """Add a teardown request function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_request`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_request def teardown(): ... """ self.record_once(lambda state: state.app.teardown_request(func, self.name)) return func
def interpret(self, msg): """ Load input """ slides = msg.get('slides', []) self.cache = msg.get('folder') self.gallery = msg.get('gallery', '..') with open(self.cache + '/slides.txt', 'w') as logfile: for ix, item in enumerate(slides): image = self.prepare_image(item) filename = self.cache_image(item, image) text = item.get('caption', '') # do not write text for heading images if item.get('heading'): text = '' if text: with open(filename + '.txt', 'w') as caption: caption.write(text) print('%s,%d' % (filename, item.get('time', 0)), file=logfile)
def function[interpret, parameter[self, msg]]: constant[ Load input ] variable[slides] assign[=] call[name[msg].get, parameter[constant[slides], list[[]]]] name[self].cache assign[=] call[name[msg].get, parameter[constant[folder]]] name[self].gallery assign[=] call[name[msg].get, parameter[constant[gallery], constant[..]]] with call[name[open], parameter[binary_operation[name[self].cache + constant[/slides.txt]], constant[w]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1384ac0>, <ast.Name object at 0x7da1b1384b80>]]] in starred[call[name[enumerate], parameter[name[slides]]]] begin[:] variable[image] assign[=] call[name[self].prepare_image, parameter[name[item]]] variable[filename] assign[=] call[name[self].cache_image, parameter[name[item], name[image]]] variable[text] assign[=] call[name[item].get, parameter[constant[caption], constant[]]] if call[name[item].get, parameter[constant[heading]]] begin[:] variable[text] assign[=] constant[] if name[text] begin[:] with call[name[open], parameter[binary_operation[name[filename] + constant[.txt]], constant[w]]] begin[:] call[name[caption].write, parameter[name[text]]] call[name[print], parameter[binary_operation[constant[%s,%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b137c7f0>, <ast.Call object at 0x7da1b137f670>]]]]]
keyword[def] identifier[interpret] ( identifier[self] , identifier[msg] ): literal[string] identifier[slides] = identifier[msg] . identifier[get] ( literal[string] ,[]) identifier[self] . identifier[cache] = identifier[msg] . identifier[get] ( literal[string] ) identifier[self] . identifier[gallery] = identifier[msg] . identifier[get] ( literal[string] , literal[string] ) keyword[with] identifier[open] ( identifier[self] . identifier[cache] + literal[string] , literal[string] ) keyword[as] identifier[logfile] : keyword[for] identifier[ix] , identifier[item] keyword[in] identifier[enumerate] ( identifier[slides] ): identifier[image] = identifier[self] . identifier[prepare_image] ( identifier[item] ) identifier[filename] = identifier[self] . identifier[cache_image] ( identifier[item] , identifier[image] ) identifier[text] = identifier[item] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[item] . identifier[get] ( literal[string] ): identifier[text] = literal[string] keyword[if] identifier[text] : keyword[with] identifier[open] ( identifier[filename] + literal[string] , literal[string] ) keyword[as] identifier[caption] : identifier[caption] . identifier[write] ( identifier[text] ) identifier[print] ( literal[string] %( identifier[filename] , identifier[item] . identifier[get] ( literal[string] , literal[int] )), identifier[file] = identifier[logfile] )
def interpret(self, msg): """ Load input """ slides = msg.get('slides', []) self.cache = msg.get('folder') self.gallery = msg.get('gallery', '..') with open(self.cache + '/slides.txt', 'w') as logfile: for (ix, item) in enumerate(slides): image = self.prepare_image(item) filename = self.cache_image(item, image) text = item.get('caption', '') # do not write text for heading images if item.get('heading'): text = '' # depends on [control=['if'], data=[]] if text: with open(filename + '.txt', 'w') as caption: caption.write(text) # depends on [control=['with'], data=['caption']] # depends on [control=['if'], data=[]] print('%s,%d' % (filename, item.get('time', 0)), file=logfile) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['open', 'logfile']]
def _blank(*objs): """Returns true when the object is false, an empty string, or an empty list""" for o in objs: if bool(o): return BooleanValue(False) return BooleanValue(True)
def function[_blank, parameter[]]: constant[Returns true when the object is false, an empty string, or an empty list] for taget[name[o]] in starred[name[objs]] begin[:] if call[name[bool], parameter[name[o]]] begin[:] return[call[name[BooleanValue], parameter[constant[False]]]] return[call[name[BooleanValue], parameter[constant[True]]]]
keyword[def] identifier[_blank] (* identifier[objs] ): literal[string] keyword[for] identifier[o] keyword[in] identifier[objs] : keyword[if] identifier[bool] ( identifier[o] ): keyword[return] identifier[BooleanValue] ( keyword[False] ) keyword[return] identifier[BooleanValue] ( keyword[True] )
def _blank(*objs): """Returns true when the object is false, an empty string, or an empty list""" for o in objs: if bool(o): return BooleanValue(False) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] return BooleanValue(True)
def _apply_policy_config(policy_spec, policy_dict): '''Applies a policy dictionary to a policy spec''' log.trace('policy_dict = %s', policy_dict) if policy_dict.get('name'): policy_spec.name = policy_dict['name'] if policy_dict.get('description'): policy_spec.description = policy_dict['description'] if policy_dict.get('subprofiles'): # Incremental changes to subprofiles and capabilities are not # supported because they would complicate updates too much # The whole configuration of all sub-profiles is expected and applied policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() subprofiles = [] for subprofile_dict in policy_dict['subprofiles']: subprofile_spec = \ pbm.profile.SubProfileCapabilityConstraints.SubProfile( name=subprofile_dict['name']) cap_specs = [] if subprofile_dict.get('force_provision'): subprofile_spec.forceProvision = \ subprofile_dict['force_provision'] for cap_dict in subprofile_dict['capabilities']: prop_inst_spec = pbm.capability.PropertyInstance( id=cap_dict['id'] ) setting_type = cap_dict['setting']['type'] if setting_type == 'set': prop_inst_spec.value = pbm.capability.types.DiscreteSet() prop_inst_spec.value.values = cap_dict['setting']['values'] elif setting_type == 'range': prop_inst_spec.value = pbm.capability.types.Range() prop_inst_spec.value.max = cap_dict['setting']['max'] prop_inst_spec.value.min = cap_dict['setting']['min'] elif setting_type == 'scalar': prop_inst_spec.value = cap_dict['setting']['value'] cap_spec = pbm.capability.CapabilityInstance( id=pbm.capability.CapabilityMetadata.UniqueId( id=cap_dict['id'], namespace=cap_dict['namespace']), constraint=[pbm.capability.ConstraintInstance( propertyInstance=[prop_inst_spec])]) cap_specs.append(cap_spec) subprofile_spec.capability = cap_specs subprofiles.append(subprofile_spec) policy_spec.constraints.subProfiles = subprofiles log.trace('updated policy_spec = %s', policy_spec) return policy_spec
def function[_apply_policy_config, parameter[policy_spec, policy_dict]]: constant[Applies a policy dictionary to a policy spec] call[name[log].trace, parameter[constant[policy_dict = %s], name[policy_dict]]] if call[name[policy_dict].get, parameter[constant[name]]] begin[:] name[policy_spec].name assign[=] call[name[policy_dict]][constant[name]] if call[name[policy_dict].get, parameter[constant[description]]] begin[:] name[policy_spec].description assign[=] call[name[policy_dict]][constant[description]] if call[name[policy_dict].get, parameter[constant[subprofiles]]] begin[:] name[policy_spec].constraints assign[=] call[name[pbm].profile.SubProfileCapabilityConstraints, parameter[]] variable[subprofiles] assign[=] list[[]] for taget[name[subprofile_dict]] in starred[call[name[policy_dict]][constant[subprofiles]]] begin[:] variable[subprofile_spec] assign[=] call[name[pbm].profile.SubProfileCapabilityConstraints.SubProfile, parameter[]] variable[cap_specs] assign[=] list[[]] if call[name[subprofile_dict].get, parameter[constant[force_provision]]] begin[:] name[subprofile_spec].forceProvision assign[=] call[name[subprofile_dict]][constant[force_provision]] for taget[name[cap_dict]] in starred[call[name[subprofile_dict]][constant[capabilities]]] begin[:] variable[prop_inst_spec] assign[=] call[name[pbm].capability.PropertyInstance, parameter[]] variable[setting_type] assign[=] call[call[name[cap_dict]][constant[setting]]][constant[type]] if compare[name[setting_type] equal[==] constant[set]] begin[:] name[prop_inst_spec].value assign[=] call[name[pbm].capability.types.DiscreteSet, parameter[]] name[prop_inst_spec].value.values assign[=] call[call[name[cap_dict]][constant[setting]]][constant[values]] variable[cap_spec] assign[=] call[name[pbm].capability.CapabilityInstance, parameter[]] call[name[cap_specs].append, parameter[name[cap_spec]]] name[subprofile_spec].capability assign[=] name[cap_specs] call[name[subprofiles].append, parameter[name[subprofile_spec]]] name[policy_spec].constraints.subProfiles assign[=] name[subprofiles] call[name[log].trace, parameter[constant[updated policy_spec = %s], name[policy_spec]]] return[name[policy_spec]]
keyword[def] identifier[_apply_policy_config] ( identifier[policy_spec] , identifier[policy_dict] ): literal[string] identifier[log] . identifier[trace] ( literal[string] , identifier[policy_dict] ) keyword[if] identifier[policy_dict] . identifier[get] ( literal[string] ): identifier[policy_spec] . identifier[name] = identifier[policy_dict] [ literal[string] ] keyword[if] identifier[policy_dict] . identifier[get] ( literal[string] ): identifier[policy_spec] . identifier[description] = identifier[policy_dict] [ literal[string] ] keyword[if] identifier[policy_dict] . identifier[get] ( literal[string] ): identifier[policy_spec] . identifier[constraints] = identifier[pbm] . identifier[profile] . identifier[SubProfileCapabilityConstraints] () identifier[subprofiles] =[] keyword[for] identifier[subprofile_dict] keyword[in] identifier[policy_dict] [ literal[string] ]: identifier[subprofile_spec] = identifier[pbm] . identifier[profile] . identifier[SubProfileCapabilityConstraints] . identifier[SubProfile] ( identifier[name] = identifier[subprofile_dict] [ literal[string] ]) identifier[cap_specs] =[] keyword[if] identifier[subprofile_dict] . identifier[get] ( literal[string] ): identifier[subprofile_spec] . identifier[forceProvision] = identifier[subprofile_dict] [ literal[string] ] keyword[for] identifier[cap_dict] keyword[in] identifier[subprofile_dict] [ literal[string] ]: identifier[prop_inst_spec] = identifier[pbm] . identifier[capability] . identifier[PropertyInstance] ( identifier[id] = identifier[cap_dict] [ literal[string] ] ) identifier[setting_type] = identifier[cap_dict] [ literal[string] ][ literal[string] ] keyword[if] identifier[setting_type] == literal[string] : identifier[prop_inst_spec] . identifier[value] = identifier[pbm] . identifier[capability] . identifier[types] . identifier[DiscreteSet] () identifier[prop_inst_spec] . identifier[value] . identifier[values] = identifier[cap_dict] [ literal[string] ][ literal[string] ] keyword[elif] identifier[setting_type] == literal[string] : identifier[prop_inst_spec] . identifier[value] = identifier[pbm] . identifier[capability] . identifier[types] . identifier[Range] () identifier[prop_inst_spec] . identifier[value] . identifier[max] = identifier[cap_dict] [ literal[string] ][ literal[string] ] identifier[prop_inst_spec] . identifier[value] . identifier[min] = identifier[cap_dict] [ literal[string] ][ literal[string] ] keyword[elif] identifier[setting_type] == literal[string] : identifier[prop_inst_spec] . identifier[value] = identifier[cap_dict] [ literal[string] ][ literal[string] ] identifier[cap_spec] = identifier[pbm] . identifier[capability] . identifier[CapabilityInstance] ( identifier[id] = identifier[pbm] . identifier[capability] . identifier[CapabilityMetadata] . identifier[UniqueId] ( identifier[id] = identifier[cap_dict] [ literal[string] ], identifier[namespace] = identifier[cap_dict] [ literal[string] ]), identifier[constraint] =[ identifier[pbm] . identifier[capability] . identifier[ConstraintInstance] ( identifier[propertyInstance] =[ identifier[prop_inst_spec] ])]) identifier[cap_specs] . identifier[append] ( identifier[cap_spec] ) identifier[subprofile_spec] . identifier[capability] = identifier[cap_specs] identifier[subprofiles] . identifier[append] ( identifier[subprofile_spec] ) identifier[policy_spec] . identifier[constraints] . identifier[subProfiles] = identifier[subprofiles] identifier[log] . identifier[trace] ( literal[string] , identifier[policy_spec] ) keyword[return] identifier[policy_spec]
def _apply_policy_config(policy_spec, policy_dict): """Applies a policy dictionary to a policy spec""" log.trace('policy_dict = %s', policy_dict) if policy_dict.get('name'): policy_spec.name = policy_dict['name'] # depends on [control=['if'], data=[]] if policy_dict.get('description'): policy_spec.description = policy_dict['description'] # depends on [control=['if'], data=[]] if policy_dict.get('subprofiles'): # Incremental changes to subprofiles and capabilities are not # supported because they would complicate updates too much # The whole configuration of all sub-profiles is expected and applied policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() subprofiles = [] for subprofile_dict in policy_dict['subprofiles']: subprofile_spec = pbm.profile.SubProfileCapabilityConstraints.SubProfile(name=subprofile_dict['name']) cap_specs = [] if subprofile_dict.get('force_provision'): subprofile_spec.forceProvision = subprofile_dict['force_provision'] # depends on [control=['if'], data=[]] for cap_dict in subprofile_dict['capabilities']: prop_inst_spec = pbm.capability.PropertyInstance(id=cap_dict['id']) setting_type = cap_dict['setting']['type'] if setting_type == 'set': prop_inst_spec.value = pbm.capability.types.DiscreteSet() prop_inst_spec.value.values = cap_dict['setting']['values'] # depends on [control=['if'], data=[]] elif setting_type == 'range': prop_inst_spec.value = pbm.capability.types.Range() prop_inst_spec.value.max = cap_dict['setting']['max'] prop_inst_spec.value.min = cap_dict['setting']['min'] # depends on [control=['if'], data=[]] elif setting_type == 'scalar': prop_inst_spec.value = cap_dict['setting']['value'] # depends on [control=['if'], data=[]] cap_spec = pbm.capability.CapabilityInstance(id=pbm.capability.CapabilityMetadata.UniqueId(id=cap_dict['id'], namespace=cap_dict['namespace']), constraint=[pbm.capability.ConstraintInstance(propertyInstance=[prop_inst_spec])]) cap_specs.append(cap_spec) # depends on [control=['for'], data=['cap_dict']] subprofile_spec.capability = cap_specs subprofiles.append(subprofile_spec) # depends on [control=['for'], data=['subprofile_dict']] policy_spec.constraints.subProfiles = subprofiles # depends on [control=['if'], data=[]] log.trace('updated policy_spec = %s', policy_spec) return policy_spec
def r_group(means, variances, n, critical_r=2., approx=False): '''Group ``m`` (Markov) chains whose common :py:func:`.r_value` is less than ``critical_r`` in each of the D dimensions. :param means: (m x D) Matrix-like array; the mean value estimates. :param variances: (m x D) Matrix-like array; the variance estimates. ''' assert len(means) == len(variances), \ 'Number of ``means`` (%i) does not match number of ``variances`` (%i)' % (len(means), len(variances)) means = _np.asarray(means) variances = _np.asarray(variances) assert means.ndim == 2, '``means`` must be matrix-like' assert variances.ndim == 2, '``variances`` must be 2-dimensional' assert means.shape[1] == variances.shape[1], \ 'Dimensionality of ``means`` (%i) and ``variances`` (%i) does not match' % (means.shape[1], variances.shape[1]) groups = [] for i in range(len(means)): assigned = False # try to assign component i to an existing group for group in groups: rows = group + [i] # R values for each parameter r_values = _np.array([r_value(means[rows, j], variances[rows, j], n, approx) for j in range(means.shape[1])]) if _np.all(r_values < critical_r): # add to group if R value small enough group.append(i) assigned = True break # if component i has not been added to an existing group case create a new group if not assigned: groups.append([i]) return groups
def function[r_group, parameter[means, variances, n, critical_r, approx]]: constant[Group ``m`` (Markov) chains whose common :py:func:`.r_value` is less than ``critical_r`` in each of the D dimensions. :param means: (m x D) Matrix-like array; the mean value estimates. :param variances: (m x D) Matrix-like array; the variance estimates. ] assert[compare[call[name[len], parameter[name[means]]] equal[==] call[name[len], parameter[name[variances]]]]] variable[means] assign[=] call[name[_np].asarray, parameter[name[means]]] variable[variances] assign[=] call[name[_np].asarray, parameter[name[variances]]] assert[compare[name[means].ndim equal[==] constant[2]]] assert[compare[name[variances].ndim equal[==] constant[2]]] assert[compare[call[name[means].shape][constant[1]] equal[==] call[name[variances].shape][constant[1]]]] variable[groups] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[means]]]]]] begin[:] variable[assigned] assign[=] constant[False] for taget[name[group]] in starred[name[groups]] begin[:] variable[rows] assign[=] binary_operation[name[group] + list[[<ast.Name object at 0x7da18f58fe50>]]] variable[r_values] assign[=] call[name[_np].array, parameter[<ast.ListComp object at 0x7da18f58ee00>]] if call[name[_np].all, parameter[compare[name[r_values] less[<] name[critical_r]]]] begin[:] call[name[group].append, parameter[name[i]]] variable[assigned] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da20c7cbeb0> begin[:] call[name[groups].append, parameter[list[[<ast.Name object at 0x7da20c7c99c0>]]]] return[name[groups]]
keyword[def] identifier[r_group] ( identifier[means] , identifier[variances] , identifier[n] , identifier[critical_r] = literal[int] , identifier[approx] = keyword[False] ): literal[string] keyword[assert] identifier[len] ( identifier[means] )== identifier[len] ( identifier[variances] ), literal[string] %( identifier[len] ( identifier[means] ), identifier[len] ( identifier[variances] )) identifier[means] = identifier[_np] . identifier[asarray] ( identifier[means] ) identifier[variances] = identifier[_np] . identifier[asarray] ( identifier[variances] ) keyword[assert] identifier[means] . identifier[ndim] == literal[int] , literal[string] keyword[assert] identifier[variances] . identifier[ndim] == literal[int] , literal[string] keyword[assert] identifier[means] . identifier[shape] [ literal[int] ]== identifier[variances] . identifier[shape] [ literal[int] ], literal[string] %( identifier[means] . identifier[shape] [ literal[int] ], identifier[variances] . identifier[shape] [ literal[int] ]) identifier[groups] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[means] )): identifier[assigned] = keyword[False] keyword[for] identifier[group] keyword[in] identifier[groups] : identifier[rows] = identifier[group] +[ identifier[i] ] identifier[r_values] = identifier[_np] . identifier[array] ([ identifier[r_value] ( identifier[means] [ identifier[rows] , identifier[j] ], identifier[variances] [ identifier[rows] , identifier[j] ], identifier[n] , identifier[approx] ) keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[means] . identifier[shape] [ literal[int] ])]) keyword[if] identifier[_np] . identifier[all] ( identifier[r_values] < identifier[critical_r] ): identifier[group] . identifier[append] ( identifier[i] ) identifier[assigned] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[assigned] : identifier[groups] . identifier[append] ([ identifier[i] ]) keyword[return] identifier[groups]
def r_group(means, variances, n, critical_r=2.0, approx=False): """Group ``m`` (Markov) chains whose common :py:func:`.r_value` is less than ``critical_r`` in each of the D dimensions. :param means: (m x D) Matrix-like array; the mean value estimates. :param variances: (m x D) Matrix-like array; the variance estimates. """ assert len(means) == len(variances), 'Number of ``means`` (%i) does not match number of ``variances`` (%i)' % (len(means), len(variances)) means = _np.asarray(means) variances = _np.asarray(variances) assert means.ndim == 2, '``means`` must be matrix-like' assert variances.ndim == 2, '``variances`` must be 2-dimensional' assert means.shape[1] == variances.shape[1], 'Dimensionality of ``means`` (%i) and ``variances`` (%i) does not match' % (means.shape[1], variances.shape[1]) groups = [] for i in range(len(means)): assigned = False # try to assign component i to an existing group for group in groups: rows = group + [i] # R values for each parameter r_values = _np.array([r_value(means[rows, j], variances[rows, j], n, approx) for j in range(means.shape[1])]) if _np.all(r_values < critical_r): # add to group if R value small enough group.append(i) assigned = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['group']] # if component i has not been added to an existing group case create a new group if not assigned: groups.append([i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return groups
def format_csv(self, delim=',', qu='"'): """ Prepares the data in CSV format """ res = qu + self.name + qu + delim if self.data: for d in self.data: res += qu + str(d) + qu + delim return res + '\n'
def function[format_csv, parameter[self, delim, qu]]: constant[ Prepares the data in CSV format ] variable[res] assign[=] binary_operation[binary_operation[binary_operation[name[qu] + name[self].name] + name[qu]] + name[delim]] if name[self].data begin[:] for taget[name[d]] in starred[name[self].data] begin[:] <ast.AugAssign object at 0x7da18bcc8610> return[binary_operation[name[res] + constant[ ]]]
keyword[def] identifier[format_csv] ( identifier[self] , identifier[delim] = literal[string] , identifier[qu] = literal[string] ): literal[string] identifier[res] = identifier[qu] + identifier[self] . identifier[name] + identifier[qu] + identifier[delim] keyword[if] identifier[self] . identifier[data] : keyword[for] identifier[d] keyword[in] identifier[self] . identifier[data] : identifier[res] += identifier[qu] + identifier[str] ( identifier[d] )+ identifier[qu] + identifier[delim] keyword[return] identifier[res] + literal[string]
def format_csv(self, delim=',', qu='"'): """ Prepares the data in CSV format """ res = qu + self.name + qu + delim if self.data: for d in self.data: res += qu + str(d) + qu + delim # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]] return res + '\n'
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' else: jquery = '' return Markup('''%s\n<script src="%s"></script> ''' % (jquery, js_url))
def function[jcrop_js, parameter[js_url, with_jquery]]: constant[Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. ] variable[serve_local] assign[=] call[name[current_app].config][constant[AVATARS_SERVE_LOCAL]] if compare[name[js_url] is constant[None]] begin[:] if name[serve_local] begin[:] variable[js_url] assign[=] call[name[url_for], parameter[constant[avatars.static]]] if name[with_jquery] begin[:] if name[serve_local] begin[:] variable[jquery] assign[=] binary_operation[constant[<script src="%s"></script>] <ast.Mod object at 0x7da2590d6920> call[name[url_for], parameter[constant[avatars.static]]]] return[call[name[Markup], parameter[binary_operation[constant[%s <script src="%s"></script> ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0feefb0>, <ast.Name object at 0x7da1b0fef0a0>]]]]]]
keyword[def] identifier[jcrop_js] ( identifier[js_url] = keyword[None] , identifier[with_jquery] = keyword[True] ): literal[string] identifier[serve_local] = identifier[current_app] . identifier[config] [ literal[string] ] keyword[if] identifier[js_url] keyword[is] keyword[None] : keyword[if] identifier[serve_local] : identifier[js_url] = identifier[url_for] ( literal[string] , identifier[filename] = literal[string] ) keyword[else] : identifier[js_url] = literal[string] keyword[if] identifier[with_jquery] : keyword[if] identifier[serve_local] : identifier[jquery] = literal[string] % identifier[url_for] ( literal[string] , identifier[filename] = literal[string] ) keyword[else] : identifier[jquery] = literal[string] keyword[else] : identifier[jquery] = literal[string] keyword[return] identifier[Markup] ( literal[string] %( identifier[jquery] , identifier[js_url] ))
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') # depends on [control=['if'], data=[]] else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' # depends on [control=['if'], data=['js_url']] if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') # depends on [control=['if'], data=[]] else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' # depends on [control=['if'], data=[]] else: jquery = '' return Markup('%s\n<script src="%s"></script>\n ' % (jquery, js_url))
def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ return irfftn(a, s, axes, norm)
def function[irfft2, parameter[a, s, axes, norm]]: constant[ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. ] return[call[name[irfftn], parameter[name[a], name[s], name[axes], name[norm]]]]
keyword[def] identifier[irfft2] ( identifier[a] , identifier[s] = keyword[None] , identifier[axes] =(- literal[int] ,- literal[int] ), identifier[norm] = keyword[None] ): literal[string] keyword[return] identifier[irfftn] ( identifier[a] , identifier[s] , identifier[axes] , identifier[norm] )
def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ return irfftn(a, s, axes, norm)
def participant(self): """ True if the tasks roles meet the legion's constraints, False otherwise. """ log = self._params.get('log', self._discard) context = self._context_build(pending=True) conf = self._config_pending if conf.get('control') == 'off': log.debug("Excluding task '%s' -- control is off", self._name) return False # If role-set is None (but not the empty set) # then role processing is inhibited. # active_roles = self._legion.get_roles() if active_roles is None: log.debug("Including task '%s' -- role processing is inhibited", self._name) return True # If roles are present, at least one has to match the role-set. # If none are present, the task is always included. # roles = self._get_list(conf.get('roles'), context=context) # If a task has no roles listed, then it particpates # in all roles: # if not roles: log.debug("Including task '%s' -- no explicit roles", self._name) return True for role in roles: if role in active_roles: log.debug("Including task '%s' -- has role '%s'", self._name, role) return True log.debug("Excluding task %r -- no role matches %s", self._name, active_roles) return False
def function[participant, parameter[self]]: constant[ True if the tasks roles meet the legion's constraints, False otherwise. ] variable[log] assign[=] call[name[self]._params.get, parameter[constant[log], name[self]._discard]] variable[context] assign[=] call[name[self]._context_build, parameter[]] variable[conf] assign[=] name[self]._config_pending if compare[call[name[conf].get, parameter[constant[control]]] equal[==] constant[off]] begin[:] call[name[log].debug, parameter[constant[Excluding task '%s' -- control is off], name[self]._name]] return[constant[False]] variable[active_roles] assign[=] call[name[self]._legion.get_roles, parameter[]] if compare[name[active_roles] is constant[None]] begin[:] call[name[log].debug, parameter[constant[Including task '%s' -- role processing is inhibited], name[self]._name]] return[constant[True]] variable[roles] assign[=] call[name[self]._get_list, parameter[call[name[conf].get, parameter[constant[roles]]]]] if <ast.UnaryOp object at 0x7da18f810df0> begin[:] call[name[log].debug, parameter[constant[Including task '%s' -- no explicit roles], name[self]._name]] return[constant[True]] for taget[name[role]] in starred[name[roles]] begin[:] if compare[name[role] in name[active_roles]] begin[:] call[name[log].debug, parameter[constant[Including task '%s' -- has role '%s'], name[self]._name, name[role]]] return[constant[True]] call[name[log].debug, parameter[constant[Excluding task %r -- no role matches %s], name[self]._name, name[active_roles]]] return[constant[False]]
keyword[def] identifier[participant] ( identifier[self] ): literal[string] identifier[log] = identifier[self] . identifier[_params] . identifier[get] ( literal[string] , identifier[self] . identifier[_discard] ) identifier[context] = identifier[self] . identifier[_context_build] ( identifier[pending] = keyword[True] ) identifier[conf] = identifier[self] . identifier[_config_pending] keyword[if] identifier[conf] . identifier[get] ( literal[string] )== literal[string] : identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_name] ) keyword[return] keyword[False] identifier[active_roles] = identifier[self] . identifier[_legion] . identifier[get_roles] () keyword[if] identifier[active_roles] keyword[is] keyword[None] : identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_name] ) keyword[return] keyword[True] identifier[roles] = identifier[self] . identifier[_get_list] ( identifier[conf] . identifier[get] ( literal[string] ), identifier[context] = identifier[context] ) keyword[if] keyword[not] identifier[roles] : identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_name] ) keyword[return] keyword[True] keyword[for] identifier[role] keyword[in] identifier[roles] : keyword[if] identifier[role] keyword[in] identifier[active_roles] : identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_name] , identifier[role] ) keyword[return] keyword[True] identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_name] , identifier[active_roles] ) keyword[return] keyword[False]
def participant(self): """ True if the tasks roles meet the legion's constraints, False otherwise. """ log = self._params.get('log', self._discard) context = self._context_build(pending=True) conf = self._config_pending if conf.get('control') == 'off': log.debug("Excluding task '%s' -- control is off", self._name) return False # depends on [control=['if'], data=[]] # If role-set is None (but not the empty set) # then role processing is inhibited. # active_roles = self._legion.get_roles() if active_roles is None: log.debug("Including task '%s' -- role processing is inhibited", self._name) return True # depends on [control=['if'], data=[]] # If roles are present, at least one has to match the role-set. # If none are present, the task is always included. # roles = self._get_list(conf.get('roles'), context=context) # If a task has no roles listed, then it particpates # in all roles: # if not roles: log.debug("Including task '%s' -- no explicit roles", self._name) return True # depends on [control=['if'], data=[]] for role in roles: if role in active_roles: log.debug("Including task '%s' -- has role '%s'", self._name, role) return True # depends on [control=['if'], data=['role']] # depends on [control=['for'], data=['role']] log.debug('Excluding task %r -- no role matches %s', self._name, active_roles) return False
def _AddFieldPaths(node, prefix, field_mask): """Adds the field paths descended from node to field_mask.""" if not node: field_mask.paths.append(prefix) return for name in sorted(node): if prefix: child_path = prefix + '.' + name else: child_path = name _AddFieldPaths(node[name], child_path, field_mask)
def function[_AddFieldPaths, parameter[node, prefix, field_mask]]: constant[Adds the field paths descended from node to field_mask.] if <ast.UnaryOp object at 0x7da20c9916f0> begin[:] call[name[field_mask].paths.append, parameter[name[prefix]]] return[None] for taget[name[name]] in starred[call[name[sorted], parameter[name[node]]]] begin[:] if name[prefix] begin[:] variable[child_path] assign[=] binary_operation[binary_operation[name[prefix] + constant[.]] + name[name]] call[name[_AddFieldPaths], parameter[call[name[node]][name[name]], name[child_path], name[field_mask]]]
keyword[def] identifier[_AddFieldPaths] ( identifier[node] , identifier[prefix] , identifier[field_mask] ): literal[string] keyword[if] keyword[not] identifier[node] : identifier[field_mask] . identifier[paths] . identifier[append] ( identifier[prefix] ) keyword[return] keyword[for] identifier[name] keyword[in] identifier[sorted] ( identifier[node] ): keyword[if] identifier[prefix] : identifier[child_path] = identifier[prefix] + literal[string] + identifier[name] keyword[else] : identifier[child_path] = identifier[name] identifier[_AddFieldPaths] ( identifier[node] [ identifier[name] ], identifier[child_path] , identifier[field_mask] )
def _AddFieldPaths(node, prefix, field_mask): """Adds the field paths descended from node to field_mask.""" if not node: field_mask.paths.append(prefix) return # depends on [control=['if'], data=[]] for name in sorted(node): if prefix: child_path = prefix + '.' + name # depends on [control=['if'], data=[]] else: child_path = name _AddFieldPaths(node[name], child_path, field_mask) # depends on [control=['for'], data=['name']]
def delete_attachment(self, attachment): """Delete attachment from the AR or Analysis The attachment will be only deleted if it is not further referenced by another AR/Analysis. """ # Get the holding parent of this attachment parent = None if attachment.getLinkedRequests(): # Holding parent is an AR parent = attachment.getRequest() elif attachment.getLinkedAnalyses(): # Holding parent is an Analysis parent = attachment.getAnalysis() if parent is None: logger.warn( "Attachment {} is nowhere assigned. This should never happen!" .format(repr(attachment))) return False # Get the other attachments of the holding parent attachments = parent.getAttachment() # New attachments to set if attachment in attachments: attachments.remove(attachment) # Set the attachments w/o the current attachments parent.setAttachment(attachments) retain = False # Attachment is referenced by another Analysis if attachment.getLinkedAnalyses(): holder = attachment.getAnalysis() logger.info("Attachment {} referenced by {} -> RETAIN" .format(repr(attachment), repr(holder))) retain = True # Attachment is referenced by another AR if attachment.getLinkedRequests(): holder = attachment.getRequest() logger.info("Attachment {} referenced by {} -> RETAIN" .format(repr(attachment), repr(holder))) retain = True # Delete attachment finally if retain is False: client = api.get_parent(attachment) client.manage_delObjects([attachment.getId(), ])
def function[delete_attachment, parameter[self, attachment]]: constant[Delete attachment from the AR or Analysis The attachment will be only deleted if it is not further referenced by another AR/Analysis. ] variable[parent] assign[=] constant[None] if call[name[attachment].getLinkedRequests, parameter[]] begin[:] variable[parent] assign[=] call[name[attachment].getRequest, parameter[]] if compare[name[parent] is constant[None]] begin[:] call[name[logger].warn, parameter[call[constant[Attachment {} is nowhere assigned. This should never happen!].format, parameter[call[name[repr], parameter[name[attachment]]]]]]] return[constant[False]] variable[attachments] assign[=] call[name[parent].getAttachment, parameter[]] if compare[name[attachment] in name[attachments]] begin[:] call[name[attachments].remove, parameter[name[attachment]]] call[name[parent].setAttachment, parameter[name[attachments]]] variable[retain] assign[=] constant[False] if call[name[attachment].getLinkedAnalyses, parameter[]] begin[:] variable[holder] assign[=] call[name[attachment].getAnalysis, parameter[]] call[name[logger].info, parameter[call[constant[Attachment {} referenced by {} -> RETAIN].format, parameter[call[name[repr], parameter[name[attachment]]], call[name[repr], parameter[name[holder]]]]]]] variable[retain] assign[=] constant[True] if call[name[attachment].getLinkedRequests, parameter[]] begin[:] variable[holder] assign[=] call[name[attachment].getRequest, parameter[]] call[name[logger].info, parameter[call[constant[Attachment {} referenced by {} -> RETAIN].format, parameter[call[name[repr], parameter[name[attachment]]], call[name[repr], parameter[name[holder]]]]]]] variable[retain] assign[=] constant[True] if compare[name[retain] is constant[False]] begin[:] variable[client] assign[=] call[name[api].get_parent, parameter[name[attachment]]] call[name[client].manage_delObjects, parameter[list[[<ast.Call object at 0x7da2054a5c30>]]]]
keyword[def] identifier[delete_attachment] ( identifier[self] , identifier[attachment] ): literal[string] identifier[parent] = keyword[None] keyword[if] identifier[attachment] . identifier[getLinkedRequests] (): identifier[parent] = identifier[attachment] . identifier[getRequest] () keyword[elif] identifier[attachment] . identifier[getLinkedAnalyses] (): identifier[parent] = identifier[attachment] . identifier[getAnalysis] () keyword[if] identifier[parent] keyword[is] keyword[None] : identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[attachment] ))) keyword[return] keyword[False] identifier[attachments] = identifier[parent] . identifier[getAttachment] () keyword[if] identifier[attachment] keyword[in] identifier[attachments] : identifier[attachments] . identifier[remove] ( identifier[attachment] ) identifier[parent] . identifier[setAttachment] ( identifier[attachments] ) identifier[retain] = keyword[False] keyword[if] identifier[attachment] . identifier[getLinkedAnalyses] (): identifier[holder] = identifier[attachment] . identifier[getAnalysis] () identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[attachment] ), identifier[repr] ( identifier[holder] ))) identifier[retain] = keyword[True] keyword[if] identifier[attachment] . identifier[getLinkedRequests] (): identifier[holder] = identifier[attachment] . identifier[getRequest] () identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[attachment] ), identifier[repr] ( identifier[holder] ))) identifier[retain] = keyword[True] keyword[if] identifier[retain] keyword[is] keyword[False] : identifier[client] = identifier[api] . identifier[get_parent] ( identifier[attachment] ) identifier[client] . identifier[manage_delObjects] ([ identifier[attachment] . identifier[getId] (),])
def delete_attachment(self, attachment): """Delete attachment from the AR or Analysis The attachment will be only deleted if it is not further referenced by another AR/Analysis. """ # Get the holding parent of this attachment parent = None if attachment.getLinkedRequests(): # Holding parent is an AR parent = attachment.getRequest() # depends on [control=['if'], data=[]] elif attachment.getLinkedAnalyses(): # Holding parent is an Analysis parent = attachment.getAnalysis() # depends on [control=['if'], data=[]] if parent is None: logger.warn('Attachment {} is nowhere assigned. This should never happen!'.format(repr(attachment))) return False # depends on [control=['if'], data=[]] # Get the other attachments of the holding parent attachments = parent.getAttachment() # New attachments to set if attachment in attachments: attachments.remove(attachment) # depends on [control=['if'], data=['attachment', 'attachments']] # Set the attachments w/o the current attachments parent.setAttachment(attachments) retain = False # Attachment is referenced by another Analysis if attachment.getLinkedAnalyses(): holder = attachment.getAnalysis() logger.info('Attachment {} referenced by {} -> RETAIN'.format(repr(attachment), repr(holder))) retain = True # depends on [control=['if'], data=[]] # Attachment is referenced by another AR if attachment.getLinkedRequests(): holder = attachment.getRequest() logger.info('Attachment {} referenced by {} -> RETAIN'.format(repr(attachment), repr(holder))) retain = True # depends on [control=['if'], data=[]] # Delete attachment finally if retain is False: client = api.get_parent(attachment) client.manage_delObjects([attachment.getId()]) # depends on [control=['if'], data=[]]
def marvcli_user_list(): """List existing users""" app = create_app() for name in db.session.query(User.name).order_by(User.name): click.echo(name[0])
def function[marvcli_user_list, parameter[]]: constant[List existing users] variable[app] assign[=] call[name[create_app], parameter[]] for taget[name[name]] in starred[call[call[name[db].session.query, parameter[name[User].name]].order_by, parameter[name[User].name]]] begin[:] call[name[click].echo, parameter[call[name[name]][constant[0]]]]
keyword[def] identifier[marvcli_user_list] (): literal[string] identifier[app] = identifier[create_app] () keyword[for] identifier[name] keyword[in] identifier[db] . identifier[session] . identifier[query] ( identifier[User] . identifier[name] ). identifier[order_by] ( identifier[User] . identifier[name] ): identifier[click] . identifier[echo] ( identifier[name] [ literal[int] ])
def marvcli_user_list(): """List existing users""" app = create_app() for name in db.session.query(User.name).order_by(User.name): click.echo(name[0]) # depends on [control=['for'], data=['name']]
def get_error(self): """Retrieve error data.""" col_offset = -1 if self.node is not None: try: col_offset = self.node.col_offset except AttributeError: pass try: exc_name = self.exc.__name__ except AttributeError: exc_name = str(self.exc) if exc_name in (None, 'None'): exc_name = 'UnknownError' out = [" %s" % self.expr] if col_offset > 0: out.append(" %s^^^" % ((col_offset)*' ')) out.append(str(self.msg)) return (exc_name, '\n'.join(out))
def function[get_error, parameter[self]]: constant[Retrieve error data.] variable[col_offset] assign[=] <ast.UnaryOp object at 0x7da1b12c5300> if compare[name[self].node is_not constant[None]] begin[:] <ast.Try object at 0x7da1b12c4070> <ast.Try object at 0x7da1b12c5c90> if compare[name[exc_name] in tuple[[<ast.Constant object at 0x7da1b12c5660>, <ast.Constant object at 0x7da1b12c5cc0>]]] begin[:] variable[exc_name] assign[=] constant[UnknownError] variable[out] assign[=] list[[<ast.BinOp object at 0x7da1b12c4fa0>]] if compare[name[col_offset] greater[>] constant[0]] begin[:] call[name[out].append, parameter[binary_operation[constant[ %s^^^] <ast.Mod object at 0x7da2590d6920> binary_operation[name[col_offset] * constant[ ]]]]] call[name[out].append, parameter[call[name[str], parameter[name[self].msg]]]] return[tuple[[<ast.Name object at 0x7da1b1213910>, <ast.Call object at 0x7da1b1213c70>]]]
keyword[def] identifier[get_error] ( identifier[self] ): literal[string] identifier[col_offset] =- literal[int] keyword[if] identifier[self] . identifier[node] keyword[is] keyword[not] keyword[None] : keyword[try] : identifier[col_offset] = identifier[self] . identifier[node] . identifier[col_offset] keyword[except] identifier[AttributeError] : keyword[pass] keyword[try] : identifier[exc_name] = identifier[self] . identifier[exc] . identifier[__name__] keyword[except] identifier[AttributeError] : identifier[exc_name] = identifier[str] ( identifier[self] . identifier[exc] ) keyword[if] identifier[exc_name] keyword[in] ( keyword[None] , literal[string] ): identifier[exc_name] = literal[string] identifier[out] =[ literal[string] % identifier[self] . identifier[expr] ] keyword[if] identifier[col_offset] > literal[int] : identifier[out] . identifier[append] ( literal[string] %(( identifier[col_offset] )* literal[string] )) identifier[out] . identifier[append] ( identifier[str] ( identifier[self] . identifier[msg] )) keyword[return] ( identifier[exc_name] , literal[string] . identifier[join] ( identifier[out] ))
def get_error(self): """Retrieve error data.""" col_offset = -1 if self.node is not None: try: col_offset = self.node.col_offset # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] try: exc_name = self.exc.__name__ # depends on [control=['try'], data=[]] except AttributeError: exc_name = str(self.exc) # depends on [control=['except'], data=[]] if exc_name in (None, 'None'): exc_name = 'UnknownError' # depends on [control=['if'], data=['exc_name']] out = [' %s' % self.expr] if col_offset > 0: out.append(' %s^^^' % (col_offset * ' ')) # depends on [control=['if'], data=['col_offset']] out.append(str(self.msg)) return (exc_name, '\n'.join(out))
def get_client(name, description, base_url=None, middlewares=None, reset=False): """ Build a complete spore client and store it :param name: name of the client :param description: the REST API description as a file or URL :param base_url: the base URL of the REST API :param middlewares: middlewares to enable :type middlewares: ordered list of 2-elements tuples -> (middleware_class, { 'predicate': ..., 'named_arg1': ..., 'named_arg2': ..., ...}) :param reset: regenerate or not the client Example : import britney_utils from britney.middleware.format import Json from britney.middleware.auth import Basic is_json = lambda environ: environ['spore.format'] == 'json' client = britney_utils.get_client('MyRestApi', 'http://my-rest-api.org/description.json', base_url='http://rest-api.org/v2/', middlewares=( (Json, {'predicate': is_json}), (Basic, {'username': 'toto', 'password': 'lala'}) )) """ if name in __clients and not reset: return __clients[name] middlewares = middlewares if middlewares is not None else [] try: client = britney.spyre(description, base_url=base_url) except (SporeClientBuildError, SporeMethodBuildError) as build_errors: logging.getLogger('britney').error(str(build_errors)) else: for middleware in middlewares: kwargs = {} if len(middleware) == 2: kwargs = middleware[1] predicate = kwargs.pop('predicate', None) if predicate: client.enable_if(predicate, middleware[0], **kwargs) else: client.enable(middleware[0], **kwargs) __clients[name] = client return client
def function[get_client, parameter[name, description, base_url, middlewares, reset]]: constant[ Build a complete spore client and store it :param name: name of the client :param description: the REST API description as a file or URL :param base_url: the base URL of the REST API :param middlewares: middlewares to enable :type middlewares: ordered list of 2-elements tuples -> (middleware_class, { 'predicate': ..., 'named_arg1': ..., 'named_arg2': ..., ...}) :param reset: regenerate or not the client Example : import britney_utils from britney.middleware.format import Json from britney.middleware.auth import Basic is_json = lambda environ: environ['spore.format'] == 'json' client = britney_utils.get_client('MyRestApi', 'http://my-rest-api.org/description.json', base_url='http://rest-api.org/v2/', middlewares=( (Json, {'predicate': is_json}), (Basic, {'username': 'toto', 'password': 'lala'}) )) ] if <ast.BoolOp object at 0x7da1b13b9de0> begin[:] return[call[name[__clients]][name[name]]] variable[middlewares] assign[=] <ast.IfExp object at 0x7da1b13ba680> <ast.Try object at 0x7da1b13b99f0>
keyword[def] identifier[get_client] ( identifier[name] , identifier[description] , identifier[base_url] = keyword[None] , identifier[middlewares] = keyword[None] , identifier[reset] = keyword[False] ): literal[string] keyword[if] identifier[name] keyword[in] identifier[__clients] keyword[and] keyword[not] identifier[reset] : keyword[return] identifier[__clients] [ identifier[name] ] identifier[middlewares] = identifier[middlewares] keyword[if] identifier[middlewares] keyword[is] keyword[not] keyword[None] keyword[else] [] keyword[try] : identifier[client] = identifier[britney] . identifier[spyre] ( identifier[description] , identifier[base_url] = identifier[base_url] ) keyword[except] ( identifier[SporeClientBuildError] , identifier[SporeMethodBuildError] ) keyword[as] identifier[build_errors] : identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[error] ( identifier[str] ( identifier[build_errors] )) keyword[else] : keyword[for] identifier[middleware] keyword[in] identifier[middlewares] : identifier[kwargs] ={} keyword[if] identifier[len] ( identifier[middleware] )== literal[int] : identifier[kwargs] = identifier[middleware] [ literal[int] ] identifier[predicate] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[predicate] : identifier[client] . identifier[enable_if] ( identifier[predicate] , identifier[middleware] [ literal[int] ],** identifier[kwargs] ) keyword[else] : identifier[client] . identifier[enable] ( identifier[middleware] [ literal[int] ],** identifier[kwargs] ) identifier[__clients] [ identifier[name] ]= identifier[client] keyword[return] identifier[client]
def get_client(name, description, base_url=None, middlewares=None, reset=False): """ Build a complete spore client and store it :param name: name of the client :param description: the REST API description as a file or URL :param base_url: the base URL of the REST API :param middlewares: middlewares to enable :type middlewares: ordered list of 2-elements tuples -> (middleware_class, { 'predicate': ..., 'named_arg1': ..., 'named_arg2': ..., ...}) :param reset: regenerate or not the client Example : import britney_utils from britney.middleware.format import Json from britney.middleware.auth import Basic is_json = lambda environ: environ['spore.format'] == 'json' client = britney_utils.get_client('MyRestApi', 'http://my-rest-api.org/description.json', base_url='http://rest-api.org/v2/', middlewares=( (Json, {'predicate': is_json}), (Basic, {'username': 'toto', 'password': 'lala'}) )) """ if name in __clients and (not reset): return __clients[name] # depends on [control=['if'], data=[]] middlewares = middlewares if middlewares is not None else [] try: client = britney.spyre(description, base_url=base_url) # depends on [control=['try'], data=[]] except (SporeClientBuildError, SporeMethodBuildError) as build_errors: logging.getLogger('britney').error(str(build_errors)) # depends on [control=['except'], data=['build_errors']] else: for middleware in middlewares: kwargs = {} if len(middleware) == 2: kwargs = middleware[1] # depends on [control=['if'], data=[]] predicate = kwargs.pop('predicate', None) if predicate: client.enable_if(predicate, middleware[0], **kwargs) # depends on [control=['if'], data=[]] else: client.enable(middleware[0], **kwargs) # depends on [control=['for'], data=['middleware']] __clients[name] = client return client
def _loadf16(ins): """ Load a 32 bit (16.16) fixed point value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value. """ output = _f16_oper(ins.quad[2]) output.append('push de') output.append('push hl') return output
def function[_loadf16, parameter[ins]]: constant[ Load a 32 bit (16.16) fixed point value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value. ] variable[output] assign[=] call[name[_f16_oper], parameter[call[name[ins].quad][constant[2]]]] call[name[output].append, parameter[constant[push de]]] call[name[output].append, parameter[constant[push hl]]] return[name[output]]
keyword[def] identifier[_loadf16] ( identifier[ins] ): literal[string] identifier[output] = identifier[_f16_oper] ( identifier[ins] . identifier[quad] [ literal[int] ]) identifier[output] . identifier[append] ( literal[string] ) identifier[output] . identifier[append] ( literal[string] ) keyword[return] identifier[output]
def _loadf16(ins): """ Load a 32 bit (16.16) fixed point value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value. """ output = _f16_oper(ins.quad[2]) output.append('push de') output.append('push hl') return output
def _noise_dict_update(noise_dict): """ Update the noise dictionary parameters with default values, in case any were missing Parameters ---------- noise_dict : dict A dictionary specifying the types of noise in this experiment. The noise types interact in important ways. First, all noise types ending with sigma (e.g. motion sigma) are mixed together in _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. The noise variables are as follows: snr [float]: Ratio of MR signal to the spatial noise sfnr [float]: Ratio of the MR signal to the temporal noise. This is the total variability that the following sigmas 'sum' to: task_sigma [float]: Size of the variance of task specific noise drift_sigma [float]: Size of the variance of drift noise auto_reg_sigma [float]: Size of the variance of autoregressive noise. This is an ARMA process where the AR and MA components can be separately specified physiological_sigma [float]: Size of the variance of physiological noise auto_reg_rho [list]: The coefficients of the autoregressive components you are modeling ma_rho [list]:The coefficients of the moving average components you are modeling max_activity [float]: The max value of the averaged brain in order to reference the template voxel_size [list]: The mm size of the voxels fwhm [float]: The gaussian smoothing kernel size (mm) matched [bool]: Specify whether you are fitting the noise parameters The volumes of brain noise that are generated have smoothness specified by 'fwhm' Returns ------- noise_dict : dict Updated dictionary """ # Create the default dictionary default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1, 'auto_reg_rho': [0.5], 'ma_rho': [0.0], 'physiological_sigma': 0, 'sfnr': 90, 'snr': 50, 'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0], 'fwhm': 4, 'matched': 1} # Check what noise is in the dictionary and add if necessary. Numbers # determine relative proportion of noise for default_key in default_dict: if default_key not in noise_dict: noise_dict[default_key] = default_dict[default_key] return noise_dict
def function[_noise_dict_update, parameter[noise_dict]]: constant[ Update the noise dictionary parameters with default values, in case any were missing Parameters ---------- noise_dict : dict A dictionary specifying the types of noise in this experiment. The noise types interact in important ways. First, all noise types ending with sigma (e.g. motion sigma) are mixed together in _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. The noise variables are as follows: snr [float]: Ratio of MR signal to the spatial noise sfnr [float]: Ratio of the MR signal to the temporal noise. This is the total variability that the following sigmas 'sum' to: task_sigma [float]: Size of the variance of task specific noise drift_sigma [float]: Size of the variance of drift noise auto_reg_sigma [float]: Size of the variance of autoregressive noise. This is an ARMA process where the AR and MA components can be separately specified physiological_sigma [float]: Size of the variance of physiological noise auto_reg_rho [list]: The coefficients of the autoregressive components you are modeling ma_rho [list]:The coefficients of the moving average components you are modeling max_activity [float]: The max value of the averaged brain in order to reference the template voxel_size [list]: The mm size of the voxels fwhm [float]: The gaussian smoothing kernel size (mm) matched [bool]: Specify whether you are fitting the noise parameters The volumes of brain noise that are generated have smoothness specified by 'fwhm' Returns ------- noise_dict : dict Updated dictionary ] variable[default_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b07909a0>, <ast.Constant object at 0x7da1b07914b0>, <ast.Constant object at 0x7da1b0790700>, <ast.Constant object at 0x7da1b07908b0>, <ast.Constant object at 0x7da1b0792c20>, <ast.Constant object at 0x7da1b0792e60>, <ast.Constant object at 0x7da1b0792a10>, <ast.Constant object at 0x7da1b0792b30>, <ast.Constant object at 0x7da1b0790a00>, <ast.Constant object at 0x7da1b07904f0>, <ast.Constant object at 0x7da1b0792ec0>, <ast.Constant object at 0x7da1b0793ac0>], [<ast.Constant object at 0x7da1b0792890>, <ast.Constant object at 0x7da1b07939a0>, <ast.Constant object at 0x7da1b0790a60>, <ast.List object at 0x7da1b07926b0>, <ast.List object at 0x7da1b0793820>, <ast.Constant object at 0x7da1b0792800>, <ast.Constant object at 0x7da1b0791120>, <ast.Constant object at 0x7da1b0793610>, <ast.Constant object at 0x7da1b07922f0>, <ast.List object at 0x7da1b07906d0>, <ast.Constant object at 0x7da1b0790220>, <ast.Constant object at 0x7da1b0792da0>]] for taget[name[default_key]] in starred[name[default_dict]] begin[:] if compare[name[default_key] <ast.NotIn object at 0x7da2590d7190> name[noise_dict]] begin[:] call[name[noise_dict]][name[default_key]] assign[=] call[name[default_dict]][name[default_key]] return[name[noise_dict]]
keyword[def] identifier[_noise_dict_update] ( identifier[noise_dict] ): literal[string] identifier[default_dict] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :[ literal[int] ], literal[string] :[ literal[int] ], literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :[ literal[int] , literal[int] , literal[int] ], literal[string] : literal[int] , literal[string] : literal[int] } keyword[for] identifier[default_key] keyword[in] identifier[default_dict] : keyword[if] identifier[default_key] keyword[not] keyword[in] identifier[noise_dict] : identifier[noise_dict] [ identifier[default_key] ]= identifier[default_dict] [ identifier[default_key] ] keyword[return] identifier[noise_dict]
def _noise_dict_update(noise_dict): """ Update the noise dictionary parameters with default values, in case any were missing Parameters ---------- noise_dict : dict A dictionary specifying the types of noise in this experiment. The noise types interact in important ways. First, all noise types ending with sigma (e.g. motion sigma) are mixed together in _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. The noise variables are as follows: snr [float]: Ratio of MR signal to the spatial noise sfnr [float]: Ratio of the MR signal to the temporal noise. This is the total variability that the following sigmas 'sum' to: task_sigma [float]: Size of the variance of task specific noise drift_sigma [float]: Size of the variance of drift noise auto_reg_sigma [float]: Size of the variance of autoregressive noise. This is an ARMA process where the AR and MA components can be separately specified physiological_sigma [float]: Size of the variance of physiological noise auto_reg_rho [list]: The coefficients of the autoregressive components you are modeling ma_rho [list]:The coefficients of the moving average components you are modeling max_activity [float]: The max value of the averaged brain in order to reference the template voxel_size [list]: The mm size of the voxels fwhm [float]: The gaussian smoothing kernel size (mm) matched [bool]: Specify whether you are fitting the noise parameters The volumes of brain noise that are generated have smoothness specified by 'fwhm' Returns ------- noise_dict : dict Updated dictionary """ # Create the default dictionary default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1, 'auto_reg_rho': [0.5], 'ma_rho': [0.0], 'physiological_sigma': 0, 'sfnr': 90, 'snr': 50, 'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0], 'fwhm': 4, 'matched': 1} # Check what noise is in the dictionary and add if necessary. Numbers # determine relative proportion of noise for default_key in default_dict: if default_key not in noise_dict: noise_dict[default_key] = default_dict[default_key] # depends on [control=['if'], data=['default_key', 'noise_dict']] # depends on [control=['for'], data=['default_key']] return noise_dict
def decode(self, data, as_list=False): """ Decode given data. :param data: sequence of bytes (string, list or generator of bytes) :param as_list: whether to return as a list instead of :return: """ return self._concat(self.decode_streaming(data))
def function[decode, parameter[self, data, as_list]]: constant[ Decode given data. :param data: sequence of bytes (string, list or generator of bytes) :param as_list: whether to return as a list instead of :return: ] return[call[name[self]._concat, parameter[call[name[self].decode_streaming, parameter[name[data]]]]]]
keyword[def] identifier[decode] ( identifier[self] , identifier[data] , identifier[as_list] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[_concat] ( identifier[self] . identifier[decode_streaming] ( identifier[data] ))
def decode(self, data, as_list=False): """ Decode given data. :param data: sequence of bytes (string, list or generator of bytes) :param as_list: whether to return as a list instead of :return: """ return self._concat(self.decode_streaming(data))
def read_chemical_shielding(self): """ Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core" will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved. Returns: List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted. """ header_pattern = r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, " \ r"285 \(1993\)\)\s+" \ r"\s+-{50,}\s+" \ r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+" \ r"\s+-{20,}\s+-{20,}\s+" \ r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+" \ r"-{50,}\s*$" first_part_pattern = r"\s+\(absolute, valence only\)\s+$" swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$" row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r'\s+'.join( [r"([-]?\d+\.\d+)"] * 3) footer_pattern = r"-{50,}\s*$" h1 = header_pattern + first_part_pattern cs_valence_only = self.read_table_pattern( h1, row_pattern, footer_pattern, postprocess=float, last_one_only=True) h2 = header_pattern + swallon_valence_body_pattern cs_valence_and_core = self.read_table_pattern( h2, row_pattern, footer_pattern, postprocess=float, last_one_only=True) all_cs = {} for name, cs_table in [["valence_only", cs_valence_only], ["valence_and_core", cs_valence_and_core]]: all_cs[name] = cs_table self.data["chemical_shielding"] = all_cs
def function[read_chemical_shielding, parameter[self]]: constant[ Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core" will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved. Returns: List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted. ] variable[header_pattern] assign[=] constant[\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, 285 \(1993\)\)\s+\s+-{50,}\s+\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+\s+-{20,}\s+-{20,}\s+\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+-{50,}\s*$] variable[first_part_pattern] assign[=] constant[\s+\(absolute, valence only\)\s+$] variable[swallon_valence_body_pattern] assign[=] constant[.+?\(absolute, valence and core\)\s+$] variable[row_pattern] assign[=] binary_operation[constant[\d+(?:\s+[-]?\d+\.\d+){3}\s+] + call[constant[\s+].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da2047ea470>]] * constant[3]]]]] variable[footer_pattern] assign[=] constant[-{50,}\s*$] variable[h1] assign[=] binary_operation[name[header_pattern] + name[first_part_pattern]] variable[cs_valence_only] assign[=] call[name[self].read_table_pattern, parameter[name[h1], name[row_pattern], name[footer_pattern]]] variable[h2] assign[=] binary_operation[name[header_pattern] + name[swallon_valence_body_pattern]] variable[cs_valence_and_core] assign[=] call[name[self].read_table_pattern, parameter[name[h2], name[row_pattern], name[footer_pattern]]] variable[all_cs] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da2047e85e0>, <ast.Name object at 0x7da2047eb2b0>]]] in starred[list[[<ast.List object at 0x7da2047e9480>, <ast.List object at 0x7da2047e8be0>]]] begin[:] call[name[all_cs]][name[name]] assign[=] name[cs_table] call[name[self].data][constant[chemical_shielding]] assign[=] name[all_cs]
keyword[def] identifier[read_chemical_shielding] ( identifier[self] ): literal[string] identifier[header_pattern] = literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] identifier[first_part_pattern] = literal[string] identifier[swallon_valence_body_pattern] = literal[string] identifier[row_pattern] = literal[string] + literal[string] . identifier[join] ( [ literal[string] ]* literal[int] ) identifier[footer_pattern] = literal[string] identifier[h1] = identifier[header_pattern] + identifier[first_part_pattern] identifier[cs_valence_only] = identifier[self] . identifier[read_table_pattern] ( identifier[h1] , identifier[row_pattern] , identifier[footer_pattern] , identifier[postprocess] = identifier[float] , identifier[last_one_only] = keyword[True] ) identifier[h2] = identifier[header_pattern] + identifier[swallon_valence_body_pattern] identifier[cs_valence_and_core] = identifier[self] . identifier[read_table_pattern] ( identifier[h2] , identifier[row_pattern] , identifier[footer_pattern] , identifier[postprocess] = identifier[float] , identifier[last_one_only] = keyword[True] ) identifier[all_cs] ={} keyword[for] identifier[name] , identifier[cs_table] keyword[in] [[ literal[string] , identifier[cs_valence_only] ], [ literal[string] , identifier[cs_valence_and_core] ]]: identifier[all_cs] [ identifier[name] ]= identifier[cs_table] identifier[self] . identifier[data] [ literal[string] ]= identifier[all_cs]
def read_chemical_shielding(self): """ Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core" will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved. Returns: List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted. """ header_pattern = '\\s+CSA tensor \\(J\\. Mason, Solid State Nucl\\. Magn\\. Reson\\. 2, 285 \\(1993\\)\\)\\s+\\s+-{50,}\\s+\\s+EXCLUDING G=0 CONTRIBUTION\\s+INCLUDING G=0 CONTRIBUTION\\s+\\s+-{20,}\\s+-{20,}\\s+\\s+ATOM\\s+ISO_SHIFT\\s+SPAN\\s+SKEW\\s+ISO_SHIFT\\s+SPAN\\s+SKEW\\s+-{50,}\\s*$' first_part_pattern = '\\s+\\(absolute, valence only\\)\\s+$' swallon_valence_body_pattern = '.+?\\(absolute, valence and core\\)\\s+$' row_pattern = '\\d+(?:\\s+[-]?\\d+\\.\\d+){3}\\s+' + '\\s+'.join(['([-]?\\d+\\.\\d+)'] * 3) footer_pattern = '-{50,}\\s*$' h1 = header_pattern + first_part_pattern cs_valence_only = self.read_table_pattern(h1, row_pattern, footer_pattern, postprocess=float, last_one_only=True) h2 = header_pattern + swallon_valence_body_pattern cs_valence_and_core = self.read_table_pattern(h2, row_pattern, footer_pattern, postprocess=float, last_one_only=True) all_cs = {} for (name, cs_table) in [['valence_only', cs_valence_only], ['valence_and_core', cs_valence_and_core]]: all_cs[name] = cs_table # depends on [control=['for'], data=[]] self.data['chemical_shielding'] = all_cs
def perform(self, cfg): """ Performs transformation according to configuration :param cfg: transformation configuration """ self.__src = self._load(cfg[Transformation.__CFG_KEY_LOAD]) self.__transform(cfg[Transformation.__CFG_KEY_TRANSFORM]) self.__cleanup(cfg[Transformation.__CFG_KEY_CLEANUP]) self.__save(cfg[Transformation.__CFG_KEY_SAVE])
def function[perform, parameter[self, cfg]]: constant[ Performs transformation according to configuration :param cfg: transformation configuration ] name[self].__src assign[=] call[name[self]._load, parameter[call[name[cfg]][name[Transformation].__CFG_KEY_LOAD]]] call[name[self].__transform, parameter[call[name[cfg]][name[Transformation].__CFG_KEY_TRANSFORM]]] call[name[self].__cleanup, parameter[call[name[cfg]][name[Transformation].__CFG_KEY_CLEANUP]]] call[name[self].__save, parameter[call[name[cfg]][name[Transformation].__CFG_KEY_SAVE]]]
keyword[def] identifier[perform] ( identifier[self] , identifier[cfg] ): literal[string] identifier[self] . identifier[__src] = identifier[self] . identifier[_load] ( identifier[cfg] [ identifier[Transformation] . identifier[__CFG_KEY_LOAD] ]) identifier[self] . identifier[__transform] ( identifier[cfg] [ identifier[Transformation] . identifier[__CFG_KEY_TRANSFORM] ]) identifier[self] . identifier[__cleanup] ( identifier[cfg] [ identifier[Transformation] . identifier[__CFG_KEY_CLEANUP] ]) identifier[self] . identifier[__save] ( identifier[cfg] [ identifier[Transformation] . identifier[__CFG_KEY_SAVE] ])
def perform(self, cfg): """ Performs transformation according to configuration :param cfg: transformation configuration """ self.__src = self._load(cfg[Transformation.__CFG_KEY_LOAD]) self.__transform(cfg[Transformation.__CFG_KEY_TRANSFORM]) self.__cleanup(cfg[Transformation.__CFG_KEY_CLEANUP]) self.__save(cfg[Transformation.__CFG_KEY_SAVE])
def _parse_template_vars(self): """ find all template variables in self._code, excluding the function name. """ template_vars = set() for var in parsing.find_template_variables(self._code): var = var.lstrip('$') if var == self.name: continue if var in ('pre', 'post'): raise ValueError('GLSL uses reserved template variable $%s' % var) template_vars.add(var) return template_vars
def function[_parse_template_vars, parameter[self]]: constant[ find all template variables in self._code, excluding the function name. ] variable[template_vars] assign[=] call[name[set], parameter[]] for taget[name[var]] in starred[call[name[parsing].find_template_variables, parameter[name[self]._code]]] begin[:] variable[var] assign[=] call[name[var].lstrip, parameter[constant[$]]] if compare[name[var] equal[==] name[self].name] begin[:] continue if compare[name[var] in tuple[[<ast.Constant object at 0x7da1b0e74a30>, <ast.Constant object at 0x7da1b0e747c0>]]] begin[:] <ast.Raise object at 0x7da1b0e75210> call[name[template_vars].add, parameter[name[var]]] return[name[template_vars]]
keyword[def] identifier[_parse_template_vars] ( identifier[self] ): literal[string] identifier[template_vars] = identifier[set] () keyword[for] identifier[var] keyword[in] identifier[parsing] . identifier[find_template_variables] ( identifier[self] . identifier[_code] ): identifier[var] = identifier[var] . identifier[lstrip] ( literal[string] ) keyword[if] identifier[var] == identifier[self] . identifier[name] : keyword[continue] keyword[if] identifier[var] keyword[in] ( literal[string] , literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[var] ) identifier[template_vars] . identifier[add] ( identifier[var] ) keyword[return] identifier[template_vars]
def _parse_template_vars(self): """ find all template variables in self._code, excluding the function name. """ template_vars = set() for var in parsing.find_template_variables(self._code): var = var.lstrip('$') if var == self.name: continue # depends on [control=['if'], data=[]] if var in ('pre', 'post'): raise ValueError('GLSL uses reserved template variable $%s' % var) # depends on [control=['if'], data=['var']] template_vars.add(var) # depends on [control=['for'], data=['var']] return template_vars
def list_tar (archive, compression, cmd, verbosity, interactive): """List a TAR archive with the tarfile Python module.""" try: with tarfile.open(archive) as tfile: tfile.list(verbose=verbosity>1) except Exception as err: msg = "error listing %s: %s" % (archive, err) raise util.PatoolError(msg) return None
def function[list_tar, parameter[archive, compression, cmd, verbosity, interactive]]: constant[List a TAR archive with the tarfile Python module.] <ast.Try object at 0x7da1b0604d60> return[constant[None]]
keyword[def] identifier[list_tar] ( identifier[archive] , identifier[compression] , identifier[cmd] , identifier[verbosity] , identifier[interactive] ): literal[string] keyword[try] : keyword[with] identifier[tarfile] . identifier[open] ( identifier[archive] ) keyword[as] identifier[tfile] : identifier[tfile] . identifier[list] ( identifier[verbose] = identifier[verbosity] > literal[int] ) keyword[except] identifier[Exception] keyword[as] identifier[err] : identifier[msg] = literal[string] %( identifier[archive] , identifier[err] ) keyword[raise] identifier[util] . identifier[PatoolError] ( identifier[msg] ) keyword[return] keyword[None]
def list_tar(archive, compression, cmd, verbosity, interactive): """List a TAR archive with the tarfile Python module.""" try: with tarfile.open(archive) as tfile: tfile.list(verbose=verbosity > 1) # depends on [control=['with'], data=['tfile']] # depends on [control=['try'], data=[]] except Exception as err: msg = 'error listing %s: %s' % (archive, err) raise util.PatoolError(msg) # depends on [control=['except'], data=['err']] return None
def create_git_commit(self, message, tree, parents, author=github.GithubObject.NotSet, committer=github.GithubObject.NotSet): """ :calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_ :param message: string :param tree: :class:`github.GitTree.GitTree` :param parents: list of :class:`github.GitCommit.GitCommit` :param author: :class:`github.InputGitAuthor.InputGitAuthor` :param committer: :class:`github.InputGitAuthor.InputGitAuthor` :rtype: :class:`github.GitCommit.GitCommit` """ assert isinstance(message, (str, unicode)), message assert isinstance(tree, github.GitTree.GitTree), tree assert all(isinstance(element, github.GitCommit.GitCommit) for element in parents), parents assert author is github.GithubObject.NotSet or isinstance(author, github.InputGitAuthor), author assert committer is github.GithubObject.NotSet or isinstance(committer, github.InputGitAuthor), committer post_parameters = { "message": message, "tree": tree._identity, "parents": [element._identity for element in parents], } if author is not github.GithubObject.NotSet: post_parameters["author"] = author._identity if committer is not github.GithubObject.NotSet: post_parameters["committer"] = committer._identity headers, data = self._requester.requestJsonAndCheck( "POST", self.url + "/git/commits", input=post_parameters ) return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def function[create_git_commit, parameter[self, message, tree, parents, author, committer]]: constant[ :calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_ :param message: string :param tree: :class:`github.GitTree.GitTree` :param parents: list of :class:`github.GitCommit.GitCommit` :param author: :class:`github.InputGitAuthor.InputGitAuthor` :param committer: :class:`github.InputGitAuthor.InputGitAuthor` :rtype: :class:`github.GitCommit.GitCommit` ] assert[call[name[isinstance], parameter[name[message], tuple[[<ast.Name object at 0x7da204961fc0>, <ast.Name object at 0x7da2049632e0>]]]]] assert[call[name[isinstance], parameter[name[tree], name[github].GitTree.GitTree]]] assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da204960fa0>]]] assert[<ast.BoolOp object at 0x7da204962e00>] assert[<ast.BoolOp object at 0x7da204962320>] variable[post_parameters] assign[=] dictionary[[<ast.Constant object at 0x7da204962f80>, <ast.Constant object at 0x7da2049625c0>, <ast.Constant object at 0x7da204960dc0>], [<ast.Name object at 0x7da204962b90>, <ast.Attribute object at 0x7da204962a40>, <ast.ListComp object at 0x7da204961120>]] if compare[name[author] is_not name[github].GithubObject.NotSet] begin[:] call[name[post_parameters]][constant[author]] assign[=] name[author]._identity if compare[name[committer] is_not name[github].GithubObject.NotSet] begin[:] call[name[post_parameters]][constant[committer]] assign[=] name[committer]._identity <ast.Tuple object at 0x7da2049634c0> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[POST], binary_operation[name[self].url + constant[/git/commits]]]] return[call[name[github].GitCommit.GitCommit, parameter[name[self]._requester, name[headers], name[data]]]]
keyword[def] identifier[create_git_commit] ( identifier[self] , identifier[message] , identifier[tree] , identifier[parents] , identifier[author] = identifier[github] . identifier[GithubObject] . identifier[NotSet] , identifier[committer] = identifier[github] . identifier[GithubObject] . identifier[NotSet] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[message] ,( identifier[str] , identifier[unicode] )), identifier[message] keyword[assert] identifier[isinstance] ( identifier[tree] , identifier[github] . identifier[GitTree] . identifier[GitTree] ), identifier[tree] keyword[assert] identifier[all] ( identifier[isinstance] ( identifier[element] , identifier[github] . identifier[GitCommit] . identifier[GitCommit] ) keyword[for] identifier[element] keyword[in] identifier[parents] ), identifier[parents] keyword[assert] identifier[author] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[author] , identifier[github] . identifier[InputGitAuthor] ), identifier[author] keyword[assert] identifier[committer] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[committer] , identifier[github] . identifier[InputGitAuthor] ), identifier[committer] identifier[post_parameters] ={ literal[string] : identifier[message] , literal[string] : identifier[tree] . identifier[_identity] , literal[string] :[ identifier[element] . identifier[_identity] keyword[for] identifier[element] keyword[in] identifier[parents] ], } keyword[if] identifier[author] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] : identifier[post_parameters] [ literal[string] ]= identifier[author] . identifier[_identity] keyword[if] identifier[committer] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] : identifier[post_parameters] [ literal[string] ]= identifier[committer] . identifier[_identity] identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] ( literal[string] , identifier[self] . identifier[url] + literal[string] , identifier[input] = identifier[post_parameters] ) keyword[return] identifier[github] . identifier[GitCommit] . identifier[GitCommit] ( identifier[self] . identifier[_requester] , identifier[headers] , identifier[data] , identifier[completed] = keyword[True] )
def create_git_commit(self, message, tree, parents, author=github.GithubObject.NotSet, committer=github.GithubObject.NotSet): """ :calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_ :param message: string :param tree: :class:`github.GitTree.GitTree` :param parents: list of :class:`github.GitCommit.GitCommit` :param author: :class:`github.InputGitAuthor.InputGitAuthor` :param committer: :class:`github.InputGitAuthor.InputGitAuthor` :rtype: :class:`github.GitCommit.GitCommit` """ assert isinstance(message, (str, unicode)), message assert isinstance(tree, github.GitTree.GitTree), tree assert all((isinstance(element, github.GitCommit.GitCommit) for element in parents)), parents assert author is github.GithubObject.NotSet or isinstance(author, github.InputGitAuthor), author assert committer is github.GithubObject.NotSet or isinstance(committer, github.InputGitAuthor), committer post_parameters = {'message': message, 'tree': tree._identity, 'parents': [element._identity for element in parents]} if author is not github.GithubObject.NotSet: post_parameters['author'] = author._identity # depends on [control=['if'], data=['author']] if committer is not github.GithubObject.NotSet: post_parameters['committer'] = committer._identity # depends on [control=['if'], data=['committer']] (headers, data) = self._requester.requestJsonAndCheck('POST', self.url + '/git/commits', input=post_parameters) return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def prob_t_compressed(self, seq_pair, multiplicity, t, return_log=False): ''' Calculate the probability of observing a sequence pair at a distance t, for compressed sequences Parameters ---------- seq_pair : numpy array :code:`np.array([(0,1), (2,2), ()..])` as indicies of pairs of aligned positions. (e.g. 'A'==0, 'C'==1 etc). This only lists all occuring parent-child state pairs, order is irrelevant multiplicity : numpy array The number of times a parent-child state pair is observed. This allows compression of the sequence representation t : float Length of the branch separating parent and child return_log : bool Whether or not to exponentiate the result ''' if t<0: logP = -ttconf.BIG_NUMBER else: tmp_eQT = self.expQt(t) bad_indices=(tmp_eQT==0) logQt = np.log(tmp_eQT + ttconf.TINY_NUMBER*(bad_indices)) logQt[np.isnan(logQt) | np.isinf(logQt) | bad_indices] = -ttconf.BIG_NUMBER logP = np.sum(logQt[seq_pair[:,1], seq_pair[:,0]]*multiplicity) return logP if return_log else np.exp(logP)
def function[prob_t_compressed, parameter[self, seq_pair, multiplicity, t, return_log]]: constant[ Calculate the probability of observing a sequence pair at a distance t, for compressed sequences Parameters ---------- seq_pair : numpy array :code:`np.array([(0,1), (2,2), ()..])` as indicies of pairs of aligned positions. (e.g. 'A'==0, 'C'==1 etc). This only lists all occuring parent-child state pairs, order is irrelevant multiplicity : numpy array The number of times a parent-child state pair is observed. This allows compression of the sequence representation t : float Length of the branch separating parent and child return_log : bool Whether or not to exponentiate the result ] if compare[name[t] less[<] constant[0]] begin[:] variable[logP] assign[=] <ast.UnaryOp object at 0x7da1b0242d10> return[<ast.IfExp object at 0x7da1b0241150>]
keyword[def] identifier[prob_t_compressed] ( identifier[self] , identifier[seq_pair] , identifier[multiplicity] , identifier[t] , identifier[return_log] = keyword[False] ): literal[string] keyword[if] identifier[t] < literal[int] : identifier[logP] =- identifier[ttconf] . identifier[BIG_NUMBER] keyword[else] : identifier[tmp_eQT] = identifier[self] . identifier[expQt] ( identifier[t] ) identifier[bad_indices] =( identifier[tmp_eQT] == literal[int] ) identifier[logQt] = identifier[np] . identifier[log] ( identifier[tmp_eQT] + identifier[ttconf] . identifier[TINY_NUMBER] *( identifier[bad_indices] )) identifier[logQt] [ identifier[np] . identifier[isnan] ( identifier[logQt] )| identifier[np] . identifier[isinf] ( identifier[logQt] )| identifier[bad_indices] ]=- identifier[ttconf] . identifier[BIG_NUMBER] identifier[logP] = identifier[np] . identifier[sum] ( identifier[logQt] [ identifier[seq_pair] [:, literal[int] ], identifier[seq_pair] [:, literal[int] ]]* identifier[multiplicity] ) keyword[return] identifier[logP] keyword[if] identifier[return_log] keyword[else] identifier[np] . identifier[exp] ( identifier[logP] )
def prob_t_compressed(self, seq_pair, multiplicity, t, return_log=False): """ Calculate the probability of observing a sequence pair at a distance t, for compressed sequences Parameters ---------- seq_pair : numpy array :code:`np.array([(0,1), (2,2), ()..])` as indicies of pairs of aligned positions. (e.g. 'A'==0, 'C'==1 etc). This only lists all occuring parent-child state pairs, order is irrelevant multiplicity : numpy array The number of times a parent-child state pair is observed. This allows compression of the sequence representation t : float Length of the branch separating parent and child return_log : bool Whether or not to exponentiate the result """ if t < 0: logP = -ttconf.BIG_NUMBER # depends on [control=['if'], data=[]] else: tmp_eQT = self.expQt(t) bad_indices = tmp_eQT == 0 logQt = np.log(tmp_eQT + ttconf.TINY_NUMBER * bad_indices) logQt[np.isnan(logQt) | np.isinf(logQt) | bad_indices] = -ttconf.BIG_NUMBER logP = np.sum(logQt[seq_pair[:, 1], seq_pair[:, 0]] * multiplicity) return logP if return_log else np.exp(logP)
def add_property(self,pid, label,term_span): """ Adds a new property to the property layer @type pid: string @param pid: property identifier @type label: string @param label: the label of the property @type term_span: list @param term_span: list of term identifiers """ new_property = Cproperty(type=self.type) self.node.append(new_property.get_node()) ##Set the id if pid is None: ##Generate a new pid existing_pids = [property.get_id() for property in self] n = 0 new_pid = '' while True: new_pid = 'p'+str(n) if new_pid not in existing_pids: break n += 1 pid = new_pid new_property.set_id(pid) new_property.set_type(label) new_ref = Creferences() new_ref.add_span(term_span) new_property.set_reference(new_ref)
def function[add_property, parameter[self, pid, label, term_span]]: constant[ Adds a new property to the property layer @type pid: string @param pid: property identifier @type label: string @param label: the label of the property @type term_span: list @param term_span: list of term identifiers ] variable[new_property] assign[=] call[name[Cproperty], parameter[]] call[name[self].node.append, parameter[call[name[new_property].get_node, parameter[]]]] if compare[name[pid] is constant[None]] begin[:] variable[existing_pids] assign[=] <ast.ListComp object at 0x7da1b2372140> variable[n] assign[=] constant[0] variable[new_pid] assign[=] constant[] while constant[True] begin[:] variable[new_pid] assign[=] binary_operation[constant[p] + call[name[str], parameter[name[n]]]] if compare[name[new_pid] <ast.NotIn object at 0x7da2590d7190> name[existing_pids]] begin[:] break <ast.AugAssign object at 0x7da1b2373910> variable[pid] assign[=] name[new_pid] call[name[new_property].set_id, parameter[name[pid]]] call[name[new_property].set_type, parameter[name[label]]] variable[new_ref] assign[=] call[name[Creferences], parameter[]] call[name[new_ref].add_span, parameter[name[term_span]]] call[name[new_property].set_reference, parameter[name[new_ref]]]
keyword[def] identifier[add_property] ( identifier[self] , identifier[pid] , identifier[label] , identifier[term_span] ): literal[string] identifier[new_property] = identifier[Cproperty] ( identifier[type] = identifier[self] . identifier[type] ) identifier[self] . identifier[node] . identifier[append] ( identifier[new_property] . identifier[get_node] ()) keyword[if] identifier[pid] keyword[is] keyword[None] : identifier[existing_pids] =[ identifier[property] . identifier[get_id] () keyword[for] identifier[property] keyword[in] identifier[self] ] identifier[n] = literal[int] identifier[new_pid] = literal[string] keyword[while] keyword[True] : identifier[new_pid] = literal[string] + identifier[str] ( identifier[n] ) keyword[if] identifier[new_pid] keyword[not] keyword[in] identifier[existing_pids] : keyword[break] identifier[n] += literal[int] identifier[pid] = identifier[new_pid] identifier[new_property] . identifier[set_id] ( identifier[pid] ) identifier[new_property] . identifier[set_type] ( identifier[label] ) identifier[new_ref] = identifier[Creferences] () identifier[new_ref] . identifier[add_span] ( identifier[term_span] ) identifier[new_property] . identifier[set_reference] ( identifier[new_ref] )
def add_property(self, pid, label, term_span): """ Adds a new property to the property layer @type pid: string @param pid: property identifier @type label: string @param label: the label of the property @type term_span: list @param term_span: list of term identifiers """ new_property = Cproperty(type=self.type) self.node.append(new_property.get_node()) ##Set the id if pid is None: ##Generate a new pid existing_pids = [property.get_id() for property in self] n = 0 new_pid = '' while True: new_pid = 'p' + str(n) if new_pid not in existing_pids: break # depends on [control=['if'], data=[]] n += 1 # depends on [control=['while'], data=[]] pid = new_pid # depends on [control=['if'], data=['pid']] new_property.set_id(pid) new_property.set_type(label) new_ref = Creferences() new_ref.add_span(term_span) new_property.set_reference(new_ref)
def build_createbug(self, product=None, component=None, version=None, summary=None, description=None, comment_private=None, blocks=None, cc=None, assigned_to=None, keywords=None, depends_on=None, groups=None, op_sys=None, platform=None, priority=None, qa_contact=None, resolution=None, severity=None, status=None, target_milestone=None, target_release=None, url=None, sub_component=None, alias=None, comment_tags=None): """ Returns a python dict() with properly formatted parameters to pass to createbug(). See bugzilla documentation for the format of the individual fields: https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#update-bug """ localdict = {} if blocks: localdict["blocks"] = self._listify(blocks) if cc: localdict["cc"] = self._listify(cc) if depends_on: localdict["depends_on"] = self._listify(depends_on) if groups: localdict["groups"] = self._listify(groups) if keywords: localdict["keywords"] = self._listify(keywords) if description: localdict["description"] = description if comment_private: localdict["comment_is_private"] = True # Most of the machinery and formatting here is the same as # build_update, so reuse that as much as possible ret = self.build_update(product=product, component=component, version=version, summary=summary, op_sys=op_sys, platform=platform, priority=priority, qa_contact=qa_contact, resolution=resolution, severity=severity, status=status, target_milestone=target_milestone, target_release=target_release, url=url, assigned_to=assigned_to, sub_component=sub_component, alias=alias, comment_tags=comment_tags) ret.update(localdict) return ret
def function[build_createbug, parameter[self, product, component, version, summary, description, comment_private, blocks, cc, assigned_to, keywords, depends_on, groups, op_sys, platform, priority, qa_contact, resolution, severity, status, target_milestone, target_release, url, sub_component, alias, comment_tags]]: constant[ Returns a python dict() with properly formatted parameters to pass to createbug(). See bugzilla documentation for the format of the individual fields: https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#update-bug ] variable[localdict] assign[=] dictionary[[], []] if name[blocks] begin[:] call[name[localdict]][constant[blocks]] assign[=] call[name[self]._listify, parameter[name[blocks]]] if name[cc] begin[:] call[name[localdict]][constant[cc]] assign[=] call[name[self]._listify, parameter[name[cc]]] if name[depends_on] begin[:] call[name[localdict]][constant[depends_on]] assign[=] call[name[self]._listify, parameter[name[depends_on]]] if name[groups] begin[:] call[name[localdict]][constant[groups]] assign[=] call[name[self]._listify, parameter[name[groups]]] if name[keywords] begin[:] call[name[localdict]][constant[keywords]] assign[=] call[name[self]._listify, parameter[name[keywords]]] if name[description] begin[:] call[name[localdict]][constant[description]] assign[=] name[description] if name[comment_private] begin[:] call[name[localdict]][constant[comment_is_private]] assign[=] constant[True] variable[ret] assign[=] call[name[self].build_update, parameter[]] call[name[ret].update, parameter[name[localdict]]] return[name[ret]]
keyword[def] identifier[build_createbug] ( identifier[self] , identifier[product] = keyword[None] , identifier[component] = keyword[None] , identifier[version] = keyword[None] , identifier[summary] = keyword[None] , identifier[description] = keyword[None] , identifier[comment_private] = keyword[None] , identifier[blocks] = keyword[None] , identifier[cc] = keyword[None] , identifier[assigned_to] = keyword[None] , identifier[keywords] = keyword[None] , identifier[depends_on] = keyword[None] , identifier[groups] = keyword[None] , identifier[op_sys] = keyword[None] , identifier[platform] = keyword[None] , identifier[priority] = keyword[None] , identifier[qa_contact] = keyword[None] , identifier[resolution] = keyword[None] , identifier[severity] = keyword[None] , identifier[status] = keyword[None] , identifier[target_milestone] = keyword[None] , identifier[target_release] = keyword[None] , identifier[url] = keyword[None] , identifier[sub_component] = keyword[None] , identifier[alias] = keyword[None] , identifier[comment_tags] = keyword[None] ): literal[string] identifier[localdict] ={} keyword[if] identifier[blocks] : identifier[localdict] [ literal[string] ]= identifier[self] . identifier[_listify] ( identifier[blocks] ) keyword[if] identifier[cc] : identifier[localdict] [ literal[string] ]= identifier[self] . identifier[_listify] ( identifier[cc] ) keyword[if] identifier[depends_on] : identifier[localdict] [ literal[string] ]= identifier[self] . identifier[_listify] ( identifier[depends_on] ) keyword[if] identifier[groups] : identifier[localdict] [ literal[string] ]= identifier[self] . identifier[_listify] ( identifier[groups] ) keyword[if] identifier[keywords] : identifier[localdict] [ literal[string] ]= identifier[self] . identifier[_listify] ( identifier[keywords] ) keyword[if] identifier[description] : identifier[localdict] [ literal[string] ]= identifier[description] keyword[if] identifier[comment_private] : identifier[localdict] [ literal[string] ]= keyword[True] identifier[ret] = identifier[self] . identifier[build_update] ( identifier[product] = identifier[product] , identifier[component] = identifier[component] , identifier[version] = identifier[version] , identifier[summary] = identifier[summary] , identifier[op_sys] = identifier[op_sys] , identifier[platform] = identifier[platform] , identifier[priority] = identifier[priority] , identifier[qa_contact] = identifier[qa_contact] , identifier[resolution] = identifier[resolution] , identifier[severity] = identifier[severity] , identifier[status] = identifier[status] , identifier[target_milestone] = identifier[target_milestone] , identifier[target_release] = identifier[target_release] , identifier[url] = identifier[url] , identifier[assigned_to] = identifier[assigned_to] , identifier[sub_component] = identifier[sub_component] , identifier[alias] = identifier[alias] , identifier[comment_tags] = identifier[comment_tags] ) identifier[ret] . identifier[update] ( identifier[localdict] ) keyword[return] identifier[ret]
def build_createbug(self, product=None, component=None, version=None, summary=None, description=None, comment_private=None, blocks=None, cc=None, assigned_to=None, keywords=None, depends_on=None, groups=None, op_sys=None, platform=None, priority=None, qa_contact=None, resolution=None, severity=None, status=None, target_milestone=None, target_release=None, url=None, sub_component=None, alias=None, comment_tags=None): """ Returns a python dict() with properly formatted parameters to pass to createbug(). See bugzilla documentation for the format of the individual fields: https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#update-bug """ localdict = {} if blocks: localdict['blocks'] = self._listify(blocks) # depends on [control=['if'], data=[]] if cc: localdict['cc'] = self._listify(cc) # depends on [control=['if'], data=[]] if depends_on: localdict['depends_on'] = self._listify(depends_on) # depends on [control=['if'], data=[]] if groups: localdict['groups'] = self._listify(groups) # depends on [control=['if'], data=[]] if keywords: localdict['keywords'] = self._listify(keywords) # depends on [control=['if'], data=[]] if description: localdict['description'] = description if comment_private: localdict['comment_is_private'] = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Most of the machinery and formatting here is the same as # build_update, so reuse that as much as possible ret = self.build_update(product=product, component=component, version=version, summary=summary, op_sys=op_sys, platform=platform, priority=priority, qa_contact=qa_contact, resolution=resolution, severity=severity, status=status, target_milestone=target_milestone, target_release=target_release, url=url, assigned_to=assigned_to, sub_component=sub_component, alias=alias, comment_tags=comment_tags) ret.update(localdict) return ret
def delete(self): """ Delete this source """ r = self._client.request('DELETE', self.url) logger.info("delete(): %s", r.status_code)
def function[delete, parameter[self]]: constant[ Delete this source ] variable[r] assign[=] call[name[self]._client.request, parameter[constant[DELETE], name[self].url]] call[name[logger].info, parameter[constant[delete(): %s], name[r].status_code]]
keyword[def] identifier[delete] ( identifier[self] ): literal[string] identifier[r] = identifier[self] . identifier[_client] . identifier[request] ( literal[string] , identifier[self] . identifier[url] ) identifier[logger] . identifier[info] ( literal[string] , identifier[r] . identifier[status_code] )
def delete(self): """ Delete this source """ r = self._client.request('DELETE', self.url) logger.info('delete(): %s', r.status_code)
def add(self, pkgs): """Add blacklist packages if not exist """ blacklist = self.get_black() pkgs = set(pkgs) print("\nAdd packages in the blacklist:\n") with open(self.blackfile, "a") as black_conf: for pkg in pkgs: if pkg not in blacklist: print("{0}{1}{2}".format(self.meta.color["GREEN"], pkg, self.meta.color["ENDC"])) black_conf.write(pkg + "\n") self.quit = True black_conf.close() if self.quit: print("")
def function[add, parameter[self, pkgs]]: constant[Add blacklist packages if not exist ] variable[blacklist] assign[=] call[name[self].get_black, parameter[]] variable[pkgs] assign[=] call[name[set], parameter[name[pkgs]]] call[name[print], parameter[constant[ Add packages in the blacklist: ]]] with call[name[open], parameter[name[self].blackfile, constant[a]]] begin[:] for taget[name[pkg]] in starred[name[pkgs]] begin[:] if compare[name[pkg] <ast.NotIn object at 0x7da2590d7190> name[blacklist]] begin[:] call[name[print], parameter[call[constant[{0}{1}{2}].format, parameter[call[name[self].meta.color][constant[GREEN]], name[pkg], call[name[self].meta.color][constant[ENDC]]]]]] call[name[black_conf].write, parameter[binary_operation[name[pkg] + constant[ ]]]] name[self].quit assign[=] constant[True] call[name[black_conf].close, parameter[]] if name[self].quit begin[:] call[name[print], parameter[constant[]]]
keyword[def] identifier[add] ( identifier[self] , identifier[pkgs] ): literal[string] identifier[blacklist] = identifier[self] . identifier[get_black] () identifier[pkgs] = identifier[set] ( identifier[pkgs] ) identifier[print] ( literal[string] ) keyword[with] identifier[open] ( identifier[self] . identifier[blackfile] , literal[string] ) keyword[as] identifier[black_conf] : keyword[for] identifier[pkg] keyword[in] identifier[pkgs] : keyword[if] identifier[pkg] keyword[not] keyword[in] identifier[blacklist] : identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[pkg] , identifier[self] . identifier[meta] . identifier[color] [ literal[string] ])) identifier[black_conf] . identifier[write] ( identifier[pkg] + literal[string] ) identifier[self] . identifier[quit] = keyword[True] identifier[black_conf] . identifier[close] () keyword[if] identifier[self] . identifier[quit] : identifier[print] ( literal[string] )
def add(self, pkgs): """Add blacklist packages if not exist """ blacklist = self.get_black() pkgs = set(pkgs) print('\nAdd packages in the blacklist:\n') with open(self.blackfile, 'a') as black_conf: for pkg in pkgs: if pkg not in blacklist: print('{0}{1}{2}'.format(self.meta.color['GREEN'], pkg, self.meta.color['ENDC'])) black_conf.write(pkg + '\n') self.quit = True # depends on [control=['if'], data=['pkg']] # depends on [control=['for'], data=['pkg']] black_conf.close() # depends on [control=['with'], data=['black_conf']] if self.quit: print('') # depends on [control=['if'], data=[]]
def get_pfam(pdb_id): """Return PFAM annotations of given PDB_ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest Returns ------- out : dict A dictionary containing the PFAM annotations for the specified PDB ID Examples -------- >>> pfam_info = get_pfam('2LME') >>> print(pfam_info) {'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor', '@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28', '@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}} """ out = get_info(pdb_id, url_root = 'http://www.rcsb.org/pdb/rest/hmmer?structureId=') out = to_dict(out) if not out['hmmer3']: return dict() return remove_at_sign(out['hmmer3'])
def function[get_pfam, parameter[pdb_id]]: constant[Return PFAM annotations of given PDB_ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest Returns ------- out : dict A dictionary containing the PFAM annotations for the specified PDB ID Examples -------- >>> pfam_info = get_pfam('2LME') >>> print(pfam_info) {'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor', '@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28', '@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}} ] variable[out] assign[=] call[name[get_info], parameter[name[pdb_id]]] variable[out] assign[=] call[name[to_dict], parameter[name[out]]] if <ast.UnaryOp object at 0x7da18dc07130> begin[:] return[call[name[dict], parameter[]]] return[call[name[remove_at_sign], parameter[call[name[out]][constant[hmmer3]]]]]
keyword[def] identifier[get_pfam] ( identifier[pdb_id] ): literal[string] identifier[out] = identifier[get_info] ( identifier[pdb_id] , identifier[url_root] = literal[string] ) identifier[out] = identifier[to_dict] ( identifier[out] ) keyword[if] keyword[not] identifier[out] [ literal[string] ]: keyword[return] identifier[dict] () keyword[return] identifier[remove_at_sign] ( identifier[out] [ literal[string] ])
def get_pfam(pdb_id): """Return PFAM annotations of given PDB_ID Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest Returns ------- out : dict A dictionary containing the PFAM annotations for the specified PDB ID Examples -------- >>> pfam_info = get_pfam('2LME') >>> print(pfam_info) {'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor', '@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28', '@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}} """ out = get_info(pdb_id, url_root='http://www.rcsb.org/pdb/rest/hmmer?structureId=') out = to_dict(out) if not out['hmmer3']: return dict() # depends on [control=['if'], data=[]] return remove_at_sign(out['hmmer3'])
def watch(self, key, **kwargs): """Watch a key. :param key: key to watch :returns: tuple of ``events_iterator`` and ``cancel``. Use ``events_iterator`` to get the events of key changes and ``cancel`` to cancel the watch request """ event_queue = queue.Queue() def callback(event): event_queue.put(event) w = watch.Watcher(self, key, callback, **kwargs) canceled = threading.Event() def cancel(): canceled.set() event_queue.put(None) w.stop() def iterator(): while not canceled.is_set(): event = event_queue.get() if event is None: canceled.set() if not canceled.is_set(): yield event return iterator(), cancel
def function[watch, parameter[self, key]]: constant[Watch a key. :param key: key to watch :returns: tuple of ``events_iterator`` and ``cancel``. Use ``events_iterator`` to get the events of key changes and ``cancel`` to cancel the watch request ] variable[event_queue] assign[=] call[name[queue].Queue, parameter[]] def function[callback, parameter[event]]: call[name[event_queue].put, parameter[name[event]]] variable[w] assign[=] call[name[watch].Watcher, parameter[name[self], name[key], name[callback]]] variable[canceled] assign[=] call[name[threading].Event, parameter[]] def function[cancel, parameter[]]: call[name[canceled].set, parameter[]] call[name[event_queue].put, parameter[constant[None]]] call[name[w].stop, parameter[]] def function[iterator, parameter[]]: while <ast.UnaryOp object at 0x7da18dc042b0> begin[:] variable[event] assign[=] call[name[event_queue].get, parameter[]] if compare[name[event] is constant[None]] begin[:] call[name[canceled].set, parameter[]] if <ast.UnaryOp object at 0x7da18bcca710> begin[:] <ast.Yield object at 0x7da18bccbc10> return[tuple[[<ast.Call object at 0x7da20c990730>, <ast.Name object at 0x7da20c9909d0>]]]
keyword[def] identifier[watch] ( identifier[self] , identifier[key] ,** identifier[kwargs] ): literal[string] identifier[event_queue] = identifier[queue] . identifier[Queue] () keyword[def] identifier[callback] ( identifier[event] ): identifier[event_queue] . identifier[put] ( identifier[event] ) identifier[w] = identifier[watch] . identifier[Watcher] ( identifier[self] , identifier[key] , identifier[callback] ,** identifier[kwargs] ) identifier[canceled] = identifier[threading] . identifier[Event] () keyword[def] identifier[cancel] (): identifier[canceled] . identifier[set] () identifier[event_queue] . identifier[put] ( keyword[None] ) identifier[w] . identifier[stop] () keyword[def] identifier[iterator] (): keyword[while] keyword[not] identifier[canceled] . identifier[is_set] (): identifier[event] = identifier[event_queue] . identifier[get] () keyword[if] identifier[event] keyword[is] keyword[None] : identifier[canceled] . identifier[set] () keyword[if] keyword[not] identifier[canceled] . identifier[is_set] (): keyword[yield] identifier[event] keyword[return] identifier[iterator] (), identifier[cancel]
def watch(self, key, **kwargs): """Watch a key. :param key: key to watch :returns: tuple of ``events_iterator`` and ``cancel``. Use ``events_iterator`` to get the events of key changes and ``cancel`` to cancel the watch request """ event_queue = queue.Queue() def callback(event): event_queue.put(event) w = watch.Watcher(self, key, callback, **kwargs) canceled = threading.Event() def cancel(): canceled.set() event_queue.put(None) w.stop() def iterator(): while not canceled.is_set(): event = event_queue.get() if event is None: canceled.set() # depends on [control=['if'], data=[]] if not canceled.is_set(): yield event # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] return (iterator(), cancel)
def run_step(self, is_shell): """Run a command. Runs a program or executable. If is_shell is True, executes the command through the shell. Args: is_shell: bool. defaults False. Set to true to execute cmd through the default shell. """ assert is_shell is not None, ("is_shell param must exist for CmdStep.") # why? If shell is True, it is recommended to pass args as a string # rather than as a sequence. if is_shell: args = self.cmd_text else: args = shlex.split(self.cmd_text) if self.is_save: completed_process = subprocess.run(args, cwd=self.cwd, shell=is_shell, # capture_output=True,only>py3.7 stdout=subprocess.PIPE, stderr=subprocess.PIPE, # text=True, only>=py3.7, universal_newlines=True) self.context['cmdOut'] = { 'returncode': completed_process.returncode, 'stdout': completed_process.stdout, 'stderr': completed_process.stderr } # when capture is true, output doesn't write to stdout self.logger.info(f"stdout: {completed_process.stdout}") if completed_process.stderr: self.logger.error(f"stderr: {completed_process.stderr}") # don't swallow the error, because it's the Step swallow decorator # responsibility to decide to ignore or not. completed_process.check_returncode() else: # check=True throws CalledProcessError if exit code != 0 subprocess.run(args, shell=is_shell, check=True, cwd=self.cwd)
def function[run_step, parameter[self, is_shell]]: constant[Run a command. Runs a program or executable. If is_shell is True, executes the command through the shell. Args: is_shell: bool. defaults False. Set to true to execute cmd through the default shell. ] assert[compare[name[is_shell] is_not constant[None]]] if name[is_shell] begin[:] variable[args] assign[=] name[self].cmd_text if name[self].is_save begin[:] variable[completed_process] assign[=] call[name[subprocess].run, parameter[name[args]]] call[name[self].context][constant[cmdOut]] assign[=] dictionary[[<ast.Constant object at 0x7da20c990a90>, <ast.Constant object at 0x7da20c990a30>, <ast.Constant object at 0x7da20c9911e0>], [<ast.Attribute object at 0x7da20c993970>, <ast.Attribute object at 0x7da20c990250>, <ast.Attribute object at 0x7da20c990790>]] call[name[self].logger.info, parameter[<ast.JoinedStr object at 0x7da20c9923b0>]] if name[completed_process].stderr begin[:] call[name[self].logger.error, parameter[<ast.JoinedStr object at 0x7da20c6aaef0>]] call[name[completed_process].check_returncode, parameter[]]
keyword[def] identifier[run_step] ( identifier[self] , identifier[is_shell] ): literal[string] keyword[assert] identifier[is_shell] keyword[is] keyword[not] keyword[None] ,( literal[string] ) keyword[if] identifier[is_shell] : identifier[args] = identifier[self] . identifier[cmd_text] keyword[else] : identifier[args] = identifier[shlex] . identifier[split] ( identifier[self] . identifier[cmd_text] ) keyword[if] identifier[self] . identifier[is_save] : identifier[completed_process] = identifier[subprocess] . identifier[run] ( identifier[args] , identifier[cwd] = identifier[self] . identifier[cwd] , identifier[shell] = identifier[is_shell] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[universal_newlines] = keyword[True] ) identifier[self] . identifier[context] [ literal[string] ]={ literal[string] : identifier[completed_process] . identifier[returncode] , literal[string] : identifier[completed_process] . identifier[stdout] , literal[string] : identifier[completed_process] . identifier[stderr] } identifier[self] . identifier[logger] . identifier[info] ( literal[string] ) keyword[if] identifier[completed_process] . identifier[stderr] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] ) identifier[completed_process] . identifier[check_returncode] () keyword[else] : identifier[subprocess] . identifier[run] ( identifier[args] , identifier[shell] = identifier[is_shell] , identifier[check] = keyword[True] , identifier[cwd] = identifier[self] . identifier[cwd] )
def run_step(self, is_shell): """Run a command. Runs a program or executable. If is_shell is True, executes the command through the shell. Args: is_shell: bool. defaults False. Set to true to execute cmd through the default shell. """ assert is_shell is not None, 'is_shell param must exist for CmdStep.' # why? If shell is True, it is recommended to pass args as a string # rather than as a sequence. if is_shell: args = self.cmd_text # depends on [control=['if'], data=[]] else: args = shlex.split(self.cmd_text) if self.is_save: # capture_output=True,only>py3.7 # text=True, only>=py3.7, completed_process = subprocess.run(args, cwd=self.cwd, shell=is_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) self.context['cmdOut'] = {'returncode': completed_process.returncode, 'stdout': completed_process.stdout, 'stderr': completed_process.stderr} # when capture is true, output doesn't write to stdout self.logger.info(f'stdout: {completed_process.stdout}') if completed_process.stderr: self.logger.error(f'stderr: {completed_process.stderr}') # depends on [control=['if'], data=[]] # don't swallow the error, because it's the Step swallow decorator # responsibility to decide to ignore or not. completed_process.check_returncode() # depends on [control=['if'], data=[]] else: # check=True throws CalledProcessError if exit code != 0 subprocess.run(args, shell=is_shell, check=True, cwd=self.cwd)
def yesno(self, prompt, error='Please type either y or n', intro=None, default=None): """ Ask user for yes or no answer The prompt will include a typical '(y/n):' at the end. Depending on whether ``default`` was specified, this may also be '(Y/n):' or '(y/N):'. The ``default`` argument can be ``True`` or ``False``, with meaning of 'yes' and 'no' respectively. Default is ``None`` which means no default. When default value is specified, malformed or empty response will cause the ``default`` value to be returned. Optional ``intro`` text can be specified which will be shown above the prompt. """ if default is None: prompt += ' (y/n):' else: if default is True: prompt += ' (Y/n):' default = 'y' if default is False: prompt += ' (y/N):' default = 'n' validator = lambda x: x in ['y', 'yes', 'n', 'no'] val = self.rvpl(prompt, error=error, intro=intro, validator=validator, clean=lambda x: x.strip().lower(), strict=default is None, default=default) return val in ['y', 'yes']
def function[yesno, parameter[self, prompt, error, intro, default]]: constant[ Ask user for yes or no answer The prompt will include a typical '(y/n):' at the end. Depending on whether ``default`` was specified, this may also be '(Y/n):' or '(y/N):'. The ``default`` argument can be ``True`` or ``False``, with meaning of 'yes' and 'no' respectively. Default is ``None`` which means no default. When default value is specified, malformed or empty response will cause the ``default`` value to be returned. Optional ``intro`` text can be specified which will be shown above the prompt. ] if compare[name[default] is constant[None]] begin[:] <ast.AugAssign object at 0x7da1b14340d0> variable[validator] assign[=] <ast.Lambda object at 0x7da1b1436ec0> variable[val] assign[=] call[name[self].rvpl, parameter[name[prompt]]] return[compare[name[val] in list[[<ast.Constant object at 0x7da1b15f6590>, <ast.Constant object at 0x7da1b15f5900>]]]]
keyword[def] identifier[yesno] ( identifier[self] , identifier[prompt] , identifier[error] = literal[string] , identifier[intro] = keyword[None] , identifier[default] = keyword[None] ): literal[string] keyword[if] identifier[default] keyword[is] keyword[None] : identifier[prompt] += literal[string] keyword[else] : keyword[if] identifier[default] keyword[is] keyword[True] : identifier[prompt] += literal[string] identifier[default] = literal[string] keyword[if] identifier[default] keyword[is] keyword[False] : identifier[prompt] += literal[string] identifier[default] = literal[string] identifier[validator] = keyword[lambda] identifier[x] : identifier[x] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ] identifier[val] = identifier[self] . identifier[rvpl] ( identifier[prompt] , identifier[error] = identifier[error] , identifier[intro] = identifier[intro] , identifier[validator] = identifier[validator] , identifier[clean] = keyword[lambda] identifier[x] : identifier[x] . identifier[strip] (). identifier[lower] (), identifier[strict] = identifier[default] keyword[is] keyword[None] , identifier[default] = identifier[default] ) keyword[return] identifier[val] keyword[in] [ literal[string] , literal[string] ]
def yesno(self, prompt, error='Please type either y or n', intro=None, default=None): """ Ask user for yes or no answer The prompt will include a typical '(y/n):' at the end. Depending on whether ``default`` was specified, this may also be '(Y/n):' or '(y/N):'. The ``default`` argument can be ``True`` or ``False``, with meaning of 'yes' and 'no' respectively. Default is ``None`` which means no default. When default value is specified, malformed or empty response will cause the ``default`` value to be returned. Optional ``intro`` text can be specified which will be shown above the prompt. """ if default is None: prompt += ' (y/n):' # depends on [control=['if'], data=[]] else: if default is True: prompt += ' (Y/n):' default = 'y' # depends on [control=['if'], data=['default']] if default is False: prompt += ' (y/N):' default = 'n' # depends on [control=['if'], data=['default']] validator = lambda x: x in ['y', 'yes', 'n', 'no'] val = self.rvpl(prompt, error=error, intro=intro, validator=validator, clean=lambda x: x.strip().lower(), strict=default is None, default=default) return val in ['y', 'yes']
def salt_cloud(): ''' The main function for salt-cloud ''' # Define 'salt' global so we may use it after ImportError. Otherwise, # UnboundLocalError will be raised. global salt # pylint: disable=W0602 try: # Late-imports for CLI performance import salt.cloud import salt.cloud.cli except ImportError as e: # No salt cloud on Windows log.error('Error importing salt cloud: %s', e) print('salt-cloud is not available in this system') sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE) if '' in sys.path: sys.path.remove('') client = salt.cloud.cli.SaltCloud() _install_signal_handlers(client) client.run()
def function[salt_cloud, parameter[]]: constant[ The main function for salt-cloud ] <ast.Global object at 0x7da2047e8ac0> <ast.Try object at 0x7da2047e8490> if compare[constant[] in name[sys].path] begin[:] call[name[sys].path.remove, parameter[constant[]]] variable[client] assign[=] call[name[salt].cloud.cli.SaltCloud, parameter[]] call[name[_install_signal_handlers], parameter[name[client]]] call[name[client].run, parameter[]]
keyword[def] identifier[salt_cloud] (): literal[string] keyword[global] identifier[salt] keyword[try] : keyword[import] identifier[salt] . identifier[cloud] keyword[import] identifier[salt] . identifier[cloud] . identifier[cli] keyword[except] identifier[ImportError] keyword[as] identifier[e] : identifier[log] . identifier[error] ( literal[string] , identifier[e] ) identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( identifier[salt] . identifier[defaults] . identifier[exitcodes] . identifier[EX_UNAVAILABLE] ) keyword[if] literal[string] keyword[in] identifier[sys] . identifier[path] : identifier[sys] . identifier[path] . identifier[remove] ( literal[string] ) identifier[client] = identifier[salt] . identifier[cloud] . identifier[cli] . identifier[SaltCloud] () identifier[_install_signal_handlers] ( identifier[client] ) identifier[client] . identifier[run] ()
def salt_cloud(): """ The main function for salt-cloud """ # Define 'salt' global so we may use it after ImportError. Otherwise, # UnboundLocalError will be raised. global salt # pylint: disable=W0602 try: # Late-imports for CLI performance import salt.cloud import salt.cloud.cli # depends on [control=['try'], data=[]] except ImportError as e: # No salt cloud on Windows log.error('Error importing salt cloud: %s', e) print('salt-cloud is not available in this system') sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE) # depends on [control=['except'], data=['e']] if '' in sys.path: sys.path.remove('') # depends on [control=['if'], data=[]] client = salt.cloud.cli.SaltCloud() _install_signal_handlers(client) client.run()
def finddirs(root): """Return a list of all the directories under `root`""" retval = [] for root, dirs, files in os.walk(root): for d in dirs: retval.append(os.path.join(root, d)) return retval
def function[finddirs, parameter[root]]: constant[Return a list of all the directories under `root`] variable[retval] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0a4f640>, <ast.Name object at 0x7da1b0a4e9b0>, <ast.Name object at 0x7da1b0a4e710>]]] in starred[call[name[os].walk, parameter[name[root]]]] begin[:] for taget[name[d]] in starred[name[dirs]] begin[:] call[name[retval].append, parameter[call[name[os].path.join, parameter[name[root], name[d]]]]] return[name[retval]]
keyword[def] identifier[finddirs] ( identifier[root] ): literal[string] identifier[retval] =[] keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[root] ): keyword[for] identifier[d] keyword[in] identifier[dirs] : identifier[retval] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[d] )) keyword[return] identifier[retval]
def finddirs(root): """Return a list of all the directories under `root`""" retval = [] for (root, dirs, files) in os.walk(root): for d in dirs: retval.append(os.path.join(root, d)) # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=[]] return retval
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
def function[dist_location, parameter[dist]]: constant[ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. ] variable[egg_link] assign[=] call[name[egg_link_path], parameter[name[dist]]] if call[name[os].path.exists, parameter[name[egg_link]]] begin[:] return[name[egg_link]] return[name[dist].location]
keyword[def] identifier[dist_location] ( identifier[dist] ): literal[string] identifier[egg_link] = identifier[egg_link_path] ( identifier[dist] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[egg_link] ): keyword[return] identifier[egg_link] keyword[return] identifier[dist] . identifier[location]
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link # depends on [control=['if'], data=[]] return dist.location
def pack(self): """Pack the service code for transmission. Returns a 2 byte string.""" sn, sa = self.number, self.attribute return pack("<H", (sn & 0x3ff) << 6 | (sa & 0x3f))
def function[pack, parameter[self]]: constant[Pack the service code for transmission. Returns a 2 byte string.] <ast.Tuple object at 0x7da20cabe0b0> assign[=] tuple[[<ast.Attribute object at 0x7da20cabe170>, <ast.Attribute object at 0x7da20cabd7b0>]] return[call[name[pack], parameter[constant[<H], binary_operation[binary_operation[binary_operation[name[sn] <ast.BitAnd object at 0x7da2590d6b60> constant[1023]] <ast.LShift object at 0x7da2590d69e0> constant[6]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[sa] <ast.BitAnd object at 0x7da2590d6b60> constant[63]]]]]]
keyword[def] identifier[pack] ( identifier[self] ): literal[string] identifier[sn] , identifier[sa] = identifier[self] . identifier[number] , identifier[self] . identifier[attribute] keyword[return] identifier[pack] ( literal[string] ,( identifier[sn] & literal[int] )<< literal[int] |( identifier[sa] & literal[int] ))
def pack(self): """Pack the service code for transmission. Returns a 2 byte string.""" (sn, sa) = (self.number, self.attribute) return pack('<H', (sn & 1023) << 6 | sa & 63)
def _update_with_csrf_disabled(d=None): """Update the input dict with CSRF disabled depending on WTF-Form version. From Flask-WTF 0.14.0, `csrf_enabled` param has been deprecated in favor of `meta={csrf: True/False}`. """ if d is None: d = {} import flask_wtf from pkg_resources import parse_version supports_meta = parse_version(flask_wtf.__version__) >= parse_version( "0.14.0") if supports_meta: d.setdefault('meta', {}) d['meta'].update({'csrf': False}) else: d['csrf_enabled'] = False return d
def function[_update_with_csrf_disabled, parameter[d]]: constant[Update the input dict with CSRF disabled depending on WTF-Form version. From Flask-WTF 0.14.0, `csrf_enabled` param has been deprecated in favor of `meta={csrf: True/False}`. ] if compare[name[d] is constant[None]] begin[:] variable[d] assign[=] dictionary[[], []] import module[flask_wtf] from relative_module[pkg_resources] import module[parse_version] variable[supports_meta] assign[=] compare[call[name[parse_version], parameter[name[flask_wtf].__version__]] greater_or_equal[>=] call[name[parse_version], parameter[constant[0.14.0]]]] if name[supports_meta] begin[:] call[name[d].setdefault, parameter[constant[meta], dictionary[[], []]]] call[call[name[d]][constant[meta]].update, parameter[dictionary[[<ast.Constant object at 0x7da20c794e80>], [<ast.Constant object at 0x7da20c796440>]]]] return[name[d]]
keyword[def] identifier[_update_with_csrf_disabled] ( identifier[d] = keyword[None] ): literal[string] keyword[if] identifier[d] keyword[is] keyword[None] : identifier[d] ={} keyword[import] identifier[flask_wtf] keyword[from] identifier[pkg_resources] keyword[import] identifier[parse_version] identifier[supports_meta] = identifier[parse_version] ( identifier[flask_wtf] . identifier[__version__] )>= identifier[parse_version] ( literal[string] ) keyword[if] identifier[supports_meta] : identifier[d] . identifier[setdefault] ( literal[string] ,{}) identifier[d] [ literal[string] ]. identifier[update] ({ literal[string] : keyword[False] }) keyword[else] : identifier[d] [ literal[string] ]= keyword[False] keyword[return] identifier[d]
def _update_with_csrf_disabled(d=None): """Update the input dict with CSRF disabled depending on WTF-Form version. From Flask-WTF 0.14.0, `csrf_enabled` param has been deprecated in favor of `meta={csrf: True/False}`. """ if d is None: d = {} # depends on [control=['if'], data=['d']] import flask_wtf from pkg_resources import parse_version supports_meta = parse_version(flask_wtf.__version__) >= parse_version('0.14.0') if supports_meta: d.setdefault('meta', {}) d['meta'].update({'csrf': False}) # depends on [control=['if'], data=[]] else: d['csrf_enabled'] = False return d
def _check_intemediate(self, myntr, maxstate): """ For each state Apq which is a known terminal, this function searches for rules Apr -> Apq Aqr and Arq -> Arp Apq where Aqr is also a known terminal or Arp is also a known terminal. It is mainly used as an optimization in order to avoid the O(n^3) for generating all the Apq -> Apr Arq rules during the PDA to CFG procedure. Args: myntr (str): The examined non terminal that was poped out of the queue maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: bool: Returns true if the algorithm was applied at least one time """ # print 'BFS Dictionary Update - Intermediate' x_term = myntr.rfind('@') y_term = myntr.rfind('A') if y_term > x_term: x_term = y_term ids = myntr[x_term + 1:].split(',') if len(ids) < 2: return 0 i = ids[0] j = ids[1] r = 0 find = 0 while r < maxstate: if r != i and r != j: if 'A' + i + ',' + \ repr(r) not in self.resolved \ and 'A' + j + ',' + repr(r) in self.resolved: self.resolved[ 'A' + i + ',' + repr(r)] = self.resolved[myntr] \ + self.resolved['A' + j + ',' + repr(r)] if self._checkfinal('A' + i + ',' + repr(r)): return self.resolved['A' + i + ',' + repr(r)] if 'A' + i + ',' + repr(r) not in self.bfs_queue: self.bfs_queue.append('A' + i + ',' + repr(r)) find = 1 if 'A' + repr(r) + ',' + j not in self.resolved and 'A' + \ repr(r) + ',' + i in self.resolved: self.resolved[ 'A' + repr(r) + ',' + j] = self.resolved['A' + repr(r) + ',' + i] \ + self.resolved[myntr] if self._checkfinal('A' + repr(r) + ',' + j): return self.resolved['A' + repr(r) + ',' + j] if 'A' + repr(r) + ',' + j not in self.bfs_queue: self.bfs_queue.append('A' + repr(r) + ',' + j) find = 1 r = r + 1 if find == 1: return 1 return 0
def function[_check_intemediate, parameter[self, myntr, maxstate]]: constant[ For each state Apq which is a known terminal, this function searches for rules Apr -> Apq Aqr and Arq -> Arp Apq where Aqr is also a known terminal or Arp is also a known terminal. It is mainly used as an optimization in order to avoid the O(n^3) for generating all the Apq -> Apr Arq rules during the PDA to CFG procedure. Args: myntr (str): The examined non terminal that was poped out of the queue maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: bool: Returns true if the algorithm was applied at least one time ] variable[x_term] assign[=] call[name[myntr].rfind, parameter[constant[@]]] variable[y_term] assign[=] call[name[myntr].rfind, parameter[constant[A]]] if compare[name[y_term] greater[>] name[x_term]] begin[:] variable[x_term] assign[=] name[y_term] variable[ids] assign[=] call[call[name[myntr]][<ast.Slice object at 0x7da20e9b0820>].split, parameter[constant[,]]] if compare[call[name[len], parameter[name[ids]]] less[<] constant[2]] begin[:] return[constant[0]] variable[i] assign[=] call[name[ids]][constant[0]] variable[j] assign[=] call[name[ids]][constant[1]] variable[r] assign[=] constant[0] variable[find] assign[=] constant[0] while compare[name[r] less[<] name[maxstate]] begin[:] if <ast.BoolOp object at 0x7da18bc720b0> begin[:] if <ast.BoolOp object at 0x7da18bc72110> begin[:] call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + name[i]] + constant[,]] + call[name[repr], parameter[name[r]]]]] assign[=] binary_operation[call[name[self].resolved][name[myntr]] + call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + name[j]] + constant[,]] + call[name[repr], parameter[name[r]]]]]] if call[name[self]._checkfinal, parameter[binary_operation[binary_operation[binary_operation[constant[A] + name[i]] + constant[,]] + call[name[repr], parameter[name[r]]]]]] begin[:] return[call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + name[i]] + constant[,]] + call[name[repr], parameter[name[r]]]]]] if compare[binary_operation[binary_operation[binary_operation[constant[A] + name[i]] + constant[,]] + call[name[repr], parameter[name[r]]]] <ast.NotIn object at 0x7da2590d7190> name[self].bfs_queue] begin[:] call[name[self].bfs_queue.append, parameter[binary_operation[binary_operation[binary_operation[constant[A] + name[i]] + constant[,]] + call[name[repr], parameter[name[r]]]]]] variable[find] assign[=] constant[1] if <ast.BoolOp object at 0x7da20c992290> begin[:] call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[j]]] assign[=] binary_operation[call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[i]]] + call[name[self].resolved][name[myntr]]] if call[name[self]._checkfinal, parameter[binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[j]]]] begin[:] return[call[name[self].resolved][binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[j]]]] if compare[binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[j]] <ast.NotIn object at 0x7da2590d7190> name[self].bfs_queue] begin[:] call[name[self].bfs_queue.append, parameter[binary_operation[binary_operation[binary_operation[constant[A] + call[name[repr], parameter[name[r]]]] + constant[,]] + name[j]]]] variable[find] assign[=] constant[1] variable[r] assign[=] binary_operation[name[r] + constant[1]] if compare[name[find] equal[==] constant[1]] begin[:] return[constant[1]] return[constant[0]]
keyword[def] identifier[_check_intemediate] ( identifier[self] , identifier[myntr] , identifier[maxstate] ): literal[string] identifier[x_term] = identifier[myntr] . identifier[rfind] ( literal[string] ) identifier[y_term] = identifier[myntr] . identifier[rfind] ( literal[string] ) keyword[if] identifier[y_term] > identifier[x_term] : identifier[x_term] = identifier[y_term] identifier[ids] = identifier[myntr] [ identifier[x_term] + literal[int] :]. identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[ids] )< literal[int] : keyword[return] literal[int] identifier[i] = identifier[ids] [ literal[int] ] identifier[j] = identifier[ids] [ literal[int] ] identifier[r] = literal[int] identifier[find] = literal[int] keyword[while] identifier[r] < identifier[maxstate] : keyword[if] identifier[r] != identifier[i] keyword[and] identifier[r] != identifier[j] : keyword[if] literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] ) keyword[not] keyword[in] identifier[self] . identifier[resolved] keyword[and] literal[string] + identifier[j] + literal[string] + identifier[repr] ( identifier[r] ) keyword[in] identifier[self] . identifier[resolved] : identifier[self] . identifier[resolved] [ literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] )]= identifier[self] . identifier[resolved] [ identifier[myntr] ]+ identifier[self] . identifier[resolved] [ literal[string] + identifier[j] + literal[string] + identifier[repr] ( identifier[r] )] keyword[if] identifier[self] . identifier[_checkfinal] ( literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] )): keyword[return] identifier[self] . identifier[resolved] [ literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] )] keyword[if] literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] ) keyword[not] keyword[in] identifier[self] . identifier[bfs_queue] : identifier[self] . identifier[bfs_queue] . identifier[append] ( literal[string] + identifier[i] + literal[string] + identifier[repr] ( identifier[r] )) identifier[find] = literal[int] keyword[if] literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] keyword[not] keyword[in] identifier[self] . identifier[resolved] keyword[and] literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[i] keyword[in] identifier[self] . identifier[resolved] : identifier[self] . identifier[resolved] [ literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] ]= identifier[self] . identifier[resolved] [ literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[i] ]+ identifier[self] . identifier[resolved] [ identifier[myntr] ] keyword[if] identifier[self] . identifier[_checkfinal] ( literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] ): keyword[return] identifier[self] . identifier[resolved] [ literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] ] keyword[if] literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] keyword[not] keyword[in] identifier[self] . identifier[bfs_queue] : identifier[self] . identifier[bfs_queue] . identifier[append] ( literal[string] + identifier[repr] ( identifier[r] )+ literal[string] + identifier[j] ) identifier[find] = literal[int] identifier[r] = identifier[r] + literal[int] keyword[if] identifier[find] == literal[int] : keyword[return] literal[int] keyword[return] literal[int]
def _check_intemediate(self, myntr, maxstate): """ For each state Apq which is a known terminal, this function searches for rules Apr -> Apq Aqr and Arq -> Arp Apq where Aqr is also a known terminal or Arp is also a known terminal. It is mainly used as an optimization in order to avoid the O(n^3) for generating all the Apq -> Apr Arq rules during the PDA to CFG procedure. Args: myntr (str): The examined non terminal that was poped out of the queue maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: bool: Returns true if the algorithm was applied at least one time """ # print 'BFS Dictionary Update - Intermediate' x_term = myntr.rfind('@') y_term = myntr.rfind('A') if y_term > x_term: x_term = y_term # depends on [control=['if'], data=['y_term', 'x_term']] ids = myntr[x_term + 1:].split(',') if len(ids) < 2: return 0 # depends on [control=['if'], data=[]] i = ids[0] j = ids[1] r = 0 find = 0 while r < maxstate: if r != i and r != j: if 'A' + i + ',' + repr(r) not in self.resolved and 'A' + j + ',' + repr(r) in self.resolved: self.resolved['A' + i + ',' + repr(r)] = self.resolved[myntr] + self.resolved['A' + j + ',' + repr(r)] if self._checkfinal('A' + i + ',' + repr(r)): return self.resolved['A' + i + ',' + repr(r)] # depends on [control=['if'], data=[]] if 'A' + i + ',' + repr(r) not in self.bfs_queue: self.bfs_queue.append('A' + i + ',' + repr(r)) # depends on [control=['if'], data=[]] find = 1 # depends on [control=['if'], data=[]] if 'A' + repr(r) + ',' + j not in self.resolved and 'A' + repr(r) + ',' + i in self.resolved: self.resolved['A' + repr(r) + ',' + j] = self.resolved['A' + repr(r) + ',' + i] + self.resolved[myntr] if self._checkfinal('A' + repr(r) + ',' + j): return self.resolved['A' + repr(r) + ',' + j] # depends on [control=['if'], data=[]] if 'A' + repr(r) + ',' + j not in self.bfs_queue: self.bfs_queue.append('A' + repr(r) + ',' + j) # depends on [control=['if'], data=[]] find = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] r = r + 1 # depends on [control=['while'], data=['r']] if find == 1: return 1 # depends on [control=['if'], data=[]] return 0
def important_dates(year): """Returns a dictionary of important dates""" output = {} data = mlbgame.data.get_important_dates(year) important_dates = etree.parse(data).getroot().\ find('queryResults').find('row') try: for x in important_dates.attrib: output[x] = important_dates.attrib[x] except AttributeError: raise ValueError('Unable to find important dates for {}.'.format(year)) return output
def function[important_dates, parameter[year]]: constant[Returns a dictionary of important dates] variable[output] assign[=] dictionary[[], []] variable[data] assign[=] call[name[mlbgame].data.get_important_dates, parameter[name[year]]] variable[important_dates] assign[=] call[call[call[call[name[etree].parse, parameter[name[data]]].getroot, parameter[]].find, parameter[constant[queryResults]]].find, parameter[constant[row]]] <ast.Try object at 0x7da207f9a980> return[name[output]]
keyword[def] identifier[important_dates] ( identifier[year] ): literal[string] identifier[output] ={} identifier[data] = identifier[mlbgame] . identifier[data] . identifier[get_important_dates] ( identifier[year] ) identifier[important_dates] = identifier[etree] . identifier[parse] ( identifier[data] ). identifier[getroot] (). identifier[find] ( literal[string] ). identifier[find] ( literal[string] ) keyword[try] : keyword[for] identifier[x] keyword[in] identifier[important_dates] . identifier[attrib] : identifier[output] [ identifier[x] ]= identifier[important_dates] . identifier[attrib] [ identifier[x] ] keyword[except] identifier[AttributeError] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[year] )) keyword[return] identifier[output]
def important_dates(year): """Returns a dictionary of important dates""" output = {} data = mlbgame.data.get_important_dates(year) important_dates = etree.parse(data).getroot().find('queryResults').find('row') try: for x in important_dates.attrib: output[x] = important_dates.attrib[x] # depends on [control=['for'], data=['x']] # depends on [control=['try'], data=[]] except AttributeError: raise ValueError('Unable to find important dates for {}.'.format(year)) # depends on [control=['except'], data=[]] return output
def _load_modeling_extent(self): """ # Get extent from GSSHA Grid in LSM coordinates # Determine range within LSM Grid """ #### # STEP 1: Get extent from GSSHA Grid in LSM coordinates #### # reproject GSSHA grid and get bounds min_x, max_x, min_y, max_y = self.gssha_grid.bounds(as_projection=self.xd.lsm.projection) # set subset indices self._set_subset_indices(min_y, max_y, min_x, max_x)
def function[_load_modeling_extent, parameter[self]]: constant[ # Get extent from GSSHA Grid in LSM coordinates # Determine range within LSM Grid ] <ast.Tuple object at 0x7da20c6a8460> assign[=] call[name[self].gssha_grid.bounds, parameter[]] call[name[self]._set_subset_indices, parameter[name[min_y], name[max_y], name[min_x], name[max_x]]]
keyword[def] identifier[_load_modeling_extent] ( identifier[self] ): literal[string] identifier[min_x] , identifier[max_x] , identifier[min_y] , identifier[max_y] = identifier[self] . identifier[gssha_grid] . identifier[bounds] ( identifier[as_projection] = identifier[self] . identifier[xd] . identifier[lsm] . identifier[projection] ) identifier[self] . identifier[_set_subset_indices] ( identifier[min_y] , identifier[max_y] , identifier[min_x] , identifier[max_x] )
def _load_modeling_extent(self): """ # Get extent from GSSHA Grid in LSM coordinates # Determine range within LSM Grid """ #### # STEP 1: Get extent from GSSHA Grid in LSM coordinates #### # reproject GSSHA grid and get bounds (min_x, max_x, min_y, max_y) = self.gssha_grid.bounds(as_projection=self.xd.lsm.projection) # set subset indices self._set_subset_indices(min_y, max_y, min_x, max_x)
def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) return self._call_java("findSynonyms", word, num)
def function[findSynonyms, parameter[self, word, num]]: constant[ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). ] if <ast.UnaryOp object at 0x7da1b1d56920> begin[:] variable[word] assign[=] call[name[_convert_to_vector], parameter[name[word]]] return[call[name[self]._call_java, parameter[constant[findSynonyms], name[word], name[num]]]]
keyword[def] identifier[findSynonyms] ( identifier[self] , identifier[word] , identifier[num] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[word] , identifier[basestring] ): identifier[word] = identifier[_convert_to_vector] ( identifier[word] ) keyword[return] identifier[self] . identifier[_call_java] ( literal[string] , identifier[word] , identifier[num] )
def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) # depends on [control=['if'], data=[]] return self._call_java('findSynonyms', word, num)
def flows(args): """ todo : add some example :param args: :return: """ def flow_if_not(fun): # t = type(fun) if isinstance(fun, iterator): return fun elif isinstance(fun, type) and 'itertools' in str(fun.__class__): return fun else: try: return flow(fun) except AttributeError: # generator object has no attribute '__module__' return fun return FlowList(map(flow_if_not, args))
def function[flows, parameter[args]]: constant[ todo : add some example :param args: :return: ] def function[flow_if_not, parameter[fun]]: if call[name[isinstance], parameter[name[fun], name[iterator]]] begin[:] return[name[fun]] return[call[name[FlowList], parameter[call[name[map], parameter[name[flow_if_not], name[args]]]]]]
keyword[def] identifier[flows] ( identifier[args] ): literal[string] keyword[def] identifier[flow_if_not] ( identifier[fun] ): keyword[if] identifier[isinstance] ( identifier[fun] , identifier[iterator] ): keyword[return] identifier[fun] keyword[elif] identifier[isinstance] ( identifier[fun] , identifier[type] ) keyword[and] literal[string] keyword[in] identifier[str] ( identifier[fun] . identifier[__class__] ): keyword[return] identifier[fun] keyword[else] : keyword[try] : keyword[return] identifier[flow] ( identifier[fun] ) keyword[except] identifier[AttributeError] : keyword[return] identifier[fun] keyword[return] identifier[FlowList] ( identifier[map] ( identifier[flow_if_not] , identifier[args] ))
def flows(args): """ todo : add some example :param args: :return: """ def flow_if_not(fun): # t = type(fun) if isinstance(fun, iterator): return fun # depends on [control=['if'], data=[]] elif isinstance(fun, type) and 'itertools' in str(fun.__class__): return fun # depends on [control=['if'], data=[]] else: try: return flow(fun) # depends on [control=['try'], data=[]] except AttributeError: # generator object has no attribute '__module__' return fun # depends on [control=['except'], data=[]] return FlowList(map(flow_if_not, args))
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None): """Computes the position of the cells in the embedding space Parameters ---------- X : array, optional, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Not required, since PHATE does not currently embed cells not given in the input matrix to `PHATE.fit()`. Accepted data types: `numpy.ndarray`, `scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If `knn_dist` is 'precomputed', `data` should be a n_samples x n_samples distance or affinity matrix t_max : int, optional, default: 100 maximum t to test if `t` is set to 'auto' plot_optimal_t : boolean, optional, default: False If true and `t` is set to 'auto', plot the Von Neumann entropy used to select t ax : matplotlib.axes.Axes, optional If given and `plot_optimal_t` is true, plot will be drawn on the given axis. Returns ------- embedding : array, shape=[n_samples, n_dimensions] The cells embedded in a lower dimensional space using PHATE """ if self.graph is None: raise NotFittedError("This PHATE instance is not fitted yet. Call " "'fit' with appropriate arguments before " "using this method.") elif X is not None and not utils.matrix_is_equivalent(X, self.X): # fit to external data warnings.warn("Pre-fit PHATE cannot be used to transform a " "new data matrix. Please fit PHATE to the new" " data by running 'fit' with the new data.", RuntimeWarning) if isinstance(self.graph, graphtools.graphs.TraditionalGraph) and \ self.graph.precomputed is not None: raise ValueError("Cannot transform additional data using a " "precomputed distance matrix.") else: transitions = self.graph.extend_to_data(X) return self.graph.interpolate(self.embedding, transitions) else: diff_potential = self.calculate_potential( t_max=t_max, plot_optimal_t=plot_optimal_t, ax=ax) if self.embedding is None: tasklogger.log_start("{} MDS".format(self.mds)) self.embedding = mds.embed_MDS( diff_potential, ndim=self.n_components, how=self.mds, distance_metric=self.mds_dist, n_jobs=self.n_jobs, seed=self.random_state, verbose=max(self.verbose - 1, 0)) tasklogger.log_complete("{} MDS".format(self.mds)) if isinstance(self.graph, graphtools.graphs.LandmarkGraph): tasklogger.log_debug("Extending to original data...") return self.graph.interpolate(self.embedding) else: return self.embedding
def function[transform, parameter[self, X, t_max, plot_optimal_t, ax]]: constant[Computes the position of the cells in the embedding space Parameters ---------- X : array, optional, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Not required, since PHATE does not currently embed cells not given in the input matrix to `PHATE.fit()`. Accepted data types: `numpy.ndarray`, `scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If `knn_dist` is 'precomputed', `data` should be a n_samples x n_samples distance or affinity matrix t_max : int, optional, default: 100 maximum t to test if `t` is set to 'auto' plot_optimal_t : boolean, optional, default: False If true and `t` is set to 'auto', plot the Von Neumann entropy used to select t ax : matplotlib.axes.Axes, optional If given and `plot_optimal_t` is true, plot will be drawn on the given axis. Returns ------- embedding : array, shape=[n_samples, n_dimensions] The cells embedded in a lower dimensional space using PHATE ] if compare[name[self].graph is constant[None]] begin[:] <ast.Raise object at 0x7da18fe92020>
keyword[def] identifier[transform] ( identifier[self] , identifier[X] = keyword[None] , identifier[t_max] = literal[int] , identifier[plot_optimal_t] = keyword[False] , identifier[ax] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[graph] keyword[is] keyword[None] : keyword[raise] identifier[NotFittedError] ( literal[string] literal[string] literal[string] ) keyword[elif] identifier[X] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[utils] . identifier[matrix_is_equivalent] ( identifier[X] , identifier[self] . identifier[X] ): identifier[warnings] . identifier[warn] ( literal[string] literal[string] literal[string] , identifier[RuntimeWarning] ) keyword[if] identifier[isinstance] ( identifier[self] . identifier[graph] , identifier[graphtools] . identifier[graphs] . identifier[TraditionalGraph] ) keyword[and] identifier[self] . identifier[graph] . identifier[precomputed] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[else] : identifier[transitions] = identifier[self] . identifier[graph] . identifier[extend_to_data] ( identifier[X] ) keyword[return] identifier[self] . identifier[graph] . identifier[interpolate] ( identifier[self] . identifier[embedding] , identifier[transitions] ) keyword[else] : identifier[diff_potential] = identifier[self] . identifier[calculate_potential] ( identifier[t_max] = identifier[t_max] , identifier[plot_optimal_t] = identifier[plot_optimal_t] , identifier[ax] = identifier[ax] ) keyword[if] identifier[self] . identifier[embedding] keyword[is] keyword[None] : identifier[tasklogger] . identifier[log_start] ( literal[string] . identifier[format] ( identifier[self] . identifier[mds] )) identifier[self] . identifier[embedding] = identifier[mds] . identifier[embed_MDS] ( identifier[diff_potential] , identifier[ndim] = identifier[self] . identifier[n_components] , identifier[how] = identifier[self] . identifier[mds] , identifier[distance_metric] = identifier[self] . identifier[mds_dist] , identifier[n_jobs] = identifier[self] . identifier[n_jobs] , identifier[seed] = identifier[self] . identifier[random_state] , identifier[verbose] = identifier[max] ( identifier[self] . identifier[verbose] - literal[int] , literal[int] )) identifier[tasklogger] . identifier[log_complete] ( literal[string] . identifier[format] ( identifier[self] . identifier[mds] )) keyword[if] identifier[isinstance] ( identifier[self] . identifier[graph] , identifier[graphtools] . identifier[graphs] . identifier[LandmarkGraph] ): identifier[tasklogger] . identifier[log_debug] ( literal[string] ) keyword[return] identifier[self] . identifier[graph] . identifier[interpolate] ( identifier[self] . identifier[embedding] ) keyword[else] : keyword[return] identifier[self] . identifier[embedding]
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None): """Computes the position of the cells in the embedding space Parameters ---------- X : array, optional, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Not required, since PHATE does not currently embed cells not given in the input matrix to `PHATE.fit()`. Accepted data types: `numpy.ndarray`, `scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If `knn_dist` is 'precomputed', `data` should be a n_samples x n_samples distance or affinity matrix t_max : int, optional, default: 100 maximum t to test if `t` is set to 'auto' plot_optimal_t : boolean, optional, default: False If true and `t` is set to 'auto', plot the Von Neumann entropy used to select t ax : matplotlib.axes.Axes, optional If given and `plot_optimal_t` is true, plot will be drawn on the given axis. Returns ------- embedding : array, shape=[n_samples, n_dimensions] The cells embedded in a lower dimensional space using PHATE """ if self.graph is None: raise NotFittedError("This PHATE instance is not fitted yet. Call 'fit' with appropriate arguments before using this method.") # depends on [control=['if'], data=[]] elif X is not None and (not utils.matrix_is_equivalent(X, self.X)): # fit to external data warnings.warn("Pre-fit PHATE cannot be used to transform a new data matrix. Please fit PHATE to the new data by running 'fit' with the new data.", RuntimeWarning) if isinstance(self.graph, graphtools.graphs.TraditionalGraph) and self.graph.precomputed is not None: raise ValueError('Cannot transform additional data using a precomputed distance matrix.') # depends on [control=['if'], data=[]] else: transitions = self.graph.extend_to_data(X) return self.graph.interpolate(self.embedding, transitions) # depends on [control=['if'], data=[]] else: diff_potential = self.calculate_potential(t_max=t_max, plot_optimal_t=plot_optimal_t, ax=ax) if self.embedding is None: tasklogger.log_start('{} MDS'.format(self.mds)) self.embedding = mds.embed_MDS(diff_potential, ndim=self.n_components, how=self.mds, distance_metric=self.mds_dist, n_jobs=self.n_jobs, seed=self.random_state, verbose=max(self.verbose - 1, 0)) tasklogger.log_complete('{} MDS'.format(self.mds)) # depends on [control=['if'], data=[]] if isinstance(self.graph, graphtools.graphs.LandmarkGraph): tasklogger.log_debug('Extending to original data...') return self.graph.interpolate(self.embedding) # depends on [control=['if'], data=[]] else: return self.embedding
def requirement_spec(package_name, *args): """Identifier used when specifying a requirement to pip or setuptools.""" if not args or args == (None,): return package_name version_specs = [] for version_spec in args: if isinstance(version_spec, (list, tuple)): operator, version = version_spec else: assert isinstance(version_spec, str) operator = "==" version = version_spec version_specs.append("%s%s" % (operator, version)) return "%s%s" % (package_name, ",".join(version_specs))
def function[requirement_spec, parameter[package_name]]: constant[Identifier used when specifying a requirement to pip or setuptools.] if <ast.BoolOp object at 0x7da18f09c220> begin[:] return[name[package_name]] variable[version_specs] assign[=] list[[]] for taget[name[version_spec]] in starred[name[args]] begin[:] if call[name[isinstance], parameter[name[version_spec], tuple[[<ast.Name object at 0x7da18f09e590>, <ast.Name object at 0x7da18f09db10>]]]] begin[:] <ast.Tuple object at 0x7da18f09ebf0> assign[=] name[version_spec] call[name[version_specs].append, parameter[binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09ca60>, <ast.Name object at 0x7da18f09fdf0>]]]]] return[binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09f040>, <ast.Call object at 0x7da18f09dba0>]]]]
keyword[def] identifier[requirement_spec] ( identifier[package_name] ,* identifier[args] ): literal[string] keyword[if] keyword[not] identifier[args] keyword[or] identifier[args] ==( keyword[None] ,): keyword[return] identifier[package_name] identifier[version_specs] =[] keyword[for] identifier[version_spec] keyword[in] identifier[args] : keyword[if] identifier[isinstance] ( identifier[version_spec] ,( identifier[list] , identifier[tuple] )): identifier[operator] , identifier[version] = identifier[version_spec] keyword[else] : keyword[assert] identifier[isinstance] ( identifier[version_spec] , identifier[str] ) identifier[operator] = literal[string] identifier[version] = identifier[version_spec] identifier[version_specs] . identifier[append] ( literal[string] %( identifier[operator] , identifier[version] )) keyword[return] literal[string] %( identifier[package_name] , literal[string] . identifier[join] ( identifier[version_specs] ))
def requirement_spec(package_name, *args): """Identifier used when specifying a requirement to pip or setuptools.""" if not args or args == (None,): return package_name # depends on [control=['if'], data=[]] version_specs = [] for version_spec in args: if isinstance(version_spec, (list, tuple)): (operator, version) = version_spec # depends on [control=['if'], data=[]] else: assert isinstance(version_spec, str) operator = '==' version = version_spec version_specs.append('%s%s' % (operator, version)) # depends on [control=['for'], data=['version_spec']] return '%s%s' % (package_name, ','.join(version_specs))
def _rename(self, node, name): ''' Rename an internal node of the tree. If an annotation is already present, append the new annotation to the end of it. If a bootstrap value is present, add annotations are added after a ":" as per standard newick format. Parameters ---------- node: dendropy.Node dendropy.Node object name : string Annotation to rename the node with. ''' if node.label: try: float(node.label) new_label = "%s:%s" % (node.label, name) except ValueError: new_label = "%s; %s" % (node.label, name) node.label = new_label else: node.label = name
def function[_rename, parameter[self, node, name]]: constant[ Rename an internal node of the tree. If an annotation is already present, append the new annotation to the end of it. If a bootstrap value is present, add annotations are added after a ":" as per standard newick format. Parameters ---------- node: dendropy.Node dendropy.Node object name : string Annotation to rename the node with. ] if name[node].label begin[:] <ast.Try object at 0x7da18f723c40> name[node].label assign[=] name[new_label]
keyword[def] identifier[_rename] ( identifier[self] , identifier[node] , identifier[name] ): literal[string] keyword[if] identifier[node] . identifier[label] : keyword[try] : identifier[float] ( identifier[node] . identifier[label] ) identifier[new_label] = literal[string] %( identifier[node] . identifier[label] , identifier[name] ) keyword[except] identifier[ValueError] : identifier[new_label] = literal[string] %( identifier[node] . identifier[label] , identifier[name] ) identifier[node] . identifier[label] = identifier[new_label] keyword[else] : identifier[node] . identifier[label] = identifier[name]
def _rename(self, node, name): """ Rename an internal node of the tree. If an annotation is already present, append the new annotation to the end of it. If a bootstrap value is present, add annotations are added after a ":" as per standard newick format. Parameters ---------- node: dendropy.Node dendropy.Node object name : string Annotation to rename the node with. """ if node.label: try: float(node.label) new_label = '%s:%s' % (node.label, name) # depends on [control=['try'], data=[]] except ValueError: new_label = '%s; %s' % (node.label, name) # depends on [control=['except'], data=[]] node.label = new_label # depends on [control=['if'], data=[]] else: node.label = name
def _viewbox_set(self, viewbox): """ Friend method of viewbox to register itself. """ self._viewbox = viewbox # Connect viewbox.events.mouse_press.connect(self.viewbox_mouse_event) viewbox.events.mouse_release.connect(self.viewbox_mouse_event) viewbox.events.mouse_move.connect(self.viewbox_mouse_event) viewbox.events.mouse_wheel.connect(self.viewbox_mouse_event) viewbox.events.resize.connect(self.viewbox_resize_event)
def function[_viewbox_set, parameter[self, viewbox]]: constant[ Friend method of viewbox to register itself. ] name[self]._viewbox assign[=] name[viewbox] call[name[viewbox].events.mouse_press.connect, parameter[name[self].viewbox_mouse_event]] call[name[viewbox].events.mouse_release.connect, parameter[name[self].viewbox_mouse_event]] call[name[viewbox].events.mouse_move.connect, parameter[name[self].viewbox_mouse_event]] call[name[viewbox].events.mouse_wheel.connect, parameter[name[self].viewbox_mouse_event]] call[name[viewbox].events.resize.connect, parameter[name[self].viewbox_resize_event]]
keyword[def] identifier[_viewbox_set] ( identifier[self] , identifier[viewbox] ): literal[string] identifier[self] . identifier[_viewbox] = identifier[viewbox] identifier[viewbox] . identifier[events] . identifier[mouse_press] . identifier[connect] ( identifier[self] . identifier[viewbox_mouse_event] ) identifier[viewbox] . identifier[events] . identifier[mouse_release] . identifier[connect] ( identifier[self] . identifier[viewbox_mouse_event] ) identifier[viewbox] . identifier[events] . identifier[mouse_move] . identifier[connect] ( identifier[self] . identifier[viewbox_mouse_event] ) identifier[viewbox] . identifier[events] . identifier[mouse_wheel] . identifier[connect] ( identifier[self] . identifier[viewbox_mouse_event] ) identifier[viewbox] . identifier[events] . identifier[resize] . identifier[connect] ( identifier[self] . identifier[viewbox_resize_event] )
def _viewbox_set(self, viewbox): """ Friend method of viewbox to register itself. """ self._viewbox = viewbox # Connect viewbox.events.mouse_press.connect(self.viewbox_mouse_event) viewbox.events.mouse_release.connect(self.viewbox_mouse_event) viewbox.events.mouse_move.connect(self.viewbox_mouse_event) viewbox.events.mouse_wheel.connect(self.viewbox_mouse_event) viewbox.events.resize.connect(self.viewbox_resize_event)
def parse_registry_uri(uri: str) -> RegistryURI: """ Validate and return (authority, pkg name, version) from a valid registry URI """ validate_registry_uri(uri) parsed_uri = parse.urlparse(uri) authority = parsed_uri.netloc pkg_name = parsed_uri.path.strip("/") pkg_version = parsed_uri.query.lstrip("version=").strip("/") return RegistryURI(authority, pkg_name, pkg_version)
def function[parse_registry_uri, parameter[uri]]: constant[ Validate and return (authority, pkg name, version) from a valid registry URI ] call[name[validate_registry_uri], parameter[name[uri]]] variable[parsed_uri] assign[=] call[name[parse].urlparse, parameter[name[uri]]] variable[authority] assign[=] name[parsed_uri].netloc variable[pkg_name] assign[=] call[name[parsed_uri].path.strip, parameter[constant[/]]] variable[pkg_version] assign[=] call[call[name[parsed_uri].query.lstrip, parameter[constant[version=]]].strip, parameter[constant[/]]] return[call[name[RegistryURI], parameter[name[authority], name[pkg_name], name[pkg_version]]]]
keyword[def] identifier[parse_registry_uri] ( identifier[uri] : identifier[str] )-> identifier[RegistryURI] : literal[string] identifier[validate_registry_uri] ( identifier[uri] ) identifier[parsed_uri] = identifier[parse] . identifier[urlparse] ( identifier[uri] ) identifier[authority] = identifier[parsed_uri] . identifier[netloc] identifier[pkg_name] = identifier[parsed_uri] . identifier[path] . identifier[strip] ( literal[string] ) identifier[pkg_version] = identifier[parsed_uri] . identifier[query] . identifier[lstrip] ( literal[string] ). identifier[strip] ( literal[string] ) keyword[return] identifier[RegistryURI] ( identifier[authority] , identifier[pkg_name] , identifier[pkg_version] )
def parse_registry_uri(uri: str) -> RegistryURI: """ Validate and return (authority, pkg name, version) from a valid registry URI """ validate_registry_uri(uri) parsed_uri = parse.urlparse(uri) authority = parsed_uri.netloc pkg_name = parsed_uri.path.strip('/') pkg_version = parsed_uri.query.lstrip('version=').strip('/') return RegistryURI(authority, pkg_name, pkg_version)
def title(self, title, *args): """ Add a title to your chart args are optional style params of the form <color>,<font size> APIPARAMS: chtt,chts """ self['chtt'] = title if args: args = color_args(args, 0) self['chts'] = ','.join(map(str,args)) return self
def function[title, parameter[self, title]]: constant[ Add a title to your chart args are optional style params of the form <color>,<font size> APIPARAMS: chtt,chts ] call[name[self]][constant[chtt]] assign[=] name[title] if name[args] begin[:] variable[args] assign[=] call[name[color_args], parameter[name[args], constant[0]]] call[name[self]][constant[chts]] assign[=] call[constant[,].join, parameter[call[name[map], parameter[name[str], name[args]]]]] return[name[self]]
keyword[def] identifier[title] ( identifier[self] , identifier[title] ,* identifier[args] ): literal[string] identifier[self] [ literal[string] ]= identifier[title] keyword[if] identifier[args] : identifier[args] = identifier[color_args] ( identifier[args] , literal[int] ) identifier[self] [ literal[string] ]= literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[args] )) keyword[return] identifier[self]
def title(self, title, *args): """ Add a title to your chart args are optional style params of the form <color>,<font size> APIPARAMS: chtt,chts """ self['chtt'] = title if args: args = color_args(args, 0) self['chts'] = ','.join(map(str, args)) # depends on [control=['if'], data=[]] return self
def leaves(self): "Generator that returns the leaves of the tree" for child in self.children: for x in child.leaves(): yield x if not self.children: yield self
def function[leaves, parameter[self]]: constant[Generator that returns the leaves of the tree] for taget[name[child]] in starred[name[self].children] begin[:] for taget[name[x]] in starred[call[name[child].leaves, parameter[]]] begin[:] <ast.Yield object at 0x7da1b08d9c00> if <ast.UnaryOp object at 0x7da1b08db610> begin[:] <ast.Yield object at 0x7da1b08dacb0>
keyword[def] identifier[leaves] ( identifier[self] ): literal[string] keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] : keyword[for] identifier[x] keyword[in] identifier[child] . identifier[leaves] (): keyword[yield] identifier[x] keyword[if] keyword[not] identifier[self] . identifier[children] : keyword[yield] identifier[self]
def leaves(self): """Generator that returns the leaves of the tree""" for child in self.children: for x in child.leaves(): yield x # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['child']] if not self.children: yield self # depends on [control=['if'], data=[]]
def setup_gui(self): """Setup the main layout of the widget.""" layout = QGridLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.canvas, 0, 1) layout.addLayout(self.setup_toolbar(), 0, 3, 2, 1) layout.setColumnStretch(0, 100) layout.setColumnStretch(2, 100) layout.setRowStretch(1, 100)
def function[setup_gui, parameter[self]]: constant[Setup the main layout of the widget.] variable[layout] assign[=] call[name[QGridLayout], parameter[name[self]]] call[name[layout].setContentsMargins, parameter[constant[0], constant[0], constant[0], constant[0]]] call[name[layout].addWidget, parameter[name[self].canvas, constant[0], constant[1]]] call[name[layout].addLayout, parameter[call[name[self].setup_toolbar, parameter[]], constant[0], constant[3], constant[2], constant[1]]] call[name[layout].setColumnStretch, parameter[constant[0], constant[100]]] call[name[layout].setColumnStretch, parameter[constant[2], constant[100]]] call[name[layout].setRowStretch, parameter[constant[1], constant[100]]]
keyword[def] identifier[setup_gui] ( identifier[self] ): literal[string] identifier[layout] = identifier[QGridLayout] ( identifier[self] ) identifier[layout] . identifier[setContentsMargins] ( literal[int] , literal[int] , literal[int] , literal[int] ) identifier[layout] . identifier[addWidget] ( identifier[self] . identifier[canvas] , literal[int] , literal[int] ) identifier[layout] . identifier[addLayout] ( identifier[self] . identifier[setup_toolbar] (), literal[int] , literal[int] , literal[int] , literal[int] ) identifier[layout] . identifier[setColumnStretch] ( literal[int] , literal[int] ) identifier[layout] . identifier[setColumnStretch] ( literal[int] , literal[int] ) identifier[layout] . identifier[setRowStretch] ( literal[int] , literal[int] )
def setup_gui(self): """Setup the main layout of the widget.""" layout = QGridLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.canvas, 0, 1) layout.addLayout(self.setup_toolbar(), 0, 3, 2, 1) layout.setColumnStretch(0, 100) layout.setColumnStretch(2, 100) layout.setRowStretch(1, 100)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle): """ Based on the C{style} and C{currentDayStyle} determine what day-of-week value is to be returned. @type wd: integer @param wd: day-of-week value for the current day @type wkdy: integer @param wkdy: day-of-week value for the parsed day @type offset: integer @param offset: offset direction for any modifiers (-1, 0, 1) @type style: integer @param style: normally the value set in C{Constants.DOWParseStyle} @type currentDayStyle: integer @param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle} @rtype: integer @return: calculated day-of-week """ diffBase = wkdy - wd origOffset = offset if offset == 2: # no modifier is present. # i.e. string to be parsed is just DOW if wkdy * style > wd * style or \ currentDayStyle and wkdy == wd: # wkdy located in current week offset = 0 elif style in (-1, 1): # wkdy located in last (-1) or next (1) week offset = style else: # invalid style, or should raise error? offset = 0 # offset = -1 means last week # offset = 0 means current week # offset = 1 means next week diff = diffBase + 7 * offset if style == 1 and diff < -7: diff += 7 elif style == -1 and diff > 7: diff -= 7 debug and log.debug("wd %s, wkdy %s, offset %d, " "style %d, currentDayStyle %d", wd, wkdy, origOffset, style, currentDayStyle) return diff
def function[_CalculateDOWDelta, parameter[self, wd, wkdy, offset, style, currentDayStyle]]: constant[ Based on the C{style} and C{currentDayStyle} determine what day-of-week value is to be returned. @type wd: integer @param wd: day-of-week value for the current day @type wkdy: integer @param wkdy: day-of-week value for the parsed day @type offset: integer @param offset: offset direction for any modifiers (-1, 0, 1) @type style: integer @param style: normally the value set in C{Constants.DOWParseStyle} @type currentDayStyle: integer @param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle} @rtype: integer @return: calculated day-of-week ] variable[diffBase] assign[=] binary_operation[name[wkdy] - name[wd]] variable[origOffset] assign[=] name[offset] if compare[name[offset] equal[==] constant[2]] begin[:] if <ast.BoolOp object at 0x7da18bcc98d0> begin[:] variable[offset] assign[=] constant[0] variable[diff] assign[=] binary_operation[name[diffBase] + binary_operation[constant[7] * name[offset]]] if <ast.BoolOp object at 0x7da18bcc98a0> begin[:] <ast.AugAssign object at 0x7da18bcc8fd0> <ast.BoolOp object at 0x7da18bcc8d00> return[name[diff]]
keyword[def] identifier[_CalculateDOWDelta] ( identifier[self] , identifier[wd] , identifier[wkdy] , identifier[offset] , identifier[style] , identifier[currentDayStyle] ): literal[string] identifier[diffBase] = identifier[wkdy] - identifier[wd] identifier[origOffset] = identifier[offset] keyword[if] identifier[offset] == literal[int] : keyword[if] identifier[wkdy] * identifier[style] > identifier[wd] * identifier[style] keyword[or] identifier[currentDayStyle] keyword[and] identifier[wkdy] == identifier[wd] : identifier[offset] = literal[int] keyword[elif] identifier[style] keyword[in] (- literal[int] , literal[int] ): identifier[offset] = identifier[style] keyword[else] : identifier[offset] = literal[int] identifier[diff] = identifier[diffBase] + literal[int] * identifier[offset] keyword[if] identifier[style] == literal[int] keyword[and] identifier[diff] <- literal[int] : identifier[diff] += literal[int] keyword[elif] identifier[style] ==- literal[int] keyword[and] identifier[diff] > literal[int] : identifier[diff] -= literal[int] identifier[debug] keyword[and] identifier[log] . identifier[debug] ( literal[string] literal[string] , identifier[wd] , identifier[wkdy] , identifier[origOffset] , identifier[style] , identifier[currentDayStyle] ) keyword[return] identifier[diff]
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle): """ Based on the C{style} and C{currentDayStyle} determine what day-of-week value is to be returned. @type wd: integer @param wd: day-of-week value for the current day @type wkdy: integer @param wkdy: day-of-week value for the parsed day @type offset: integer @param offset: offset direction for any modifiers (-1, 0, 1) @type style: integer @param style: normally the value set in C{Constants.DOWParseStyle} @type currentDayStyle: integer @param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle} @rtype: integer @return: calculated day-of-week """ diffBase = wkdy - wd origOffset = offset if offset == 2: # no modifier is present. # i.e. string to be parsed is just DOW if wkdy * style > wd * style or (currentDayStyle and wkdy == wd): # wkdy located in current week offset = 0 # depends on [control=['if'], data=[]] elif style in (-1, 1): # wkdy located in last (-1) or next (1) week offset = style # depends on [control=['if'], data=['style']] else: # invalid style, or should raise error? offset = 0 # depends on [control=['if'], data=['offset']] # offset = -1 means last week # offset = 0 means current week # offset = 1 means next week diff = diffBase + 7 * offset if style == 1 and diff < -7: diff += 7 # depends on [control=['if'], data=[]] elif style == -1 and diff > 7: diff -= 7 # depends on [control=['if'], data=[]] debug and log.debug('wd %s, wkdy %s, offset %d, style %d, currentDayStyle %d', wd, wkdy, origOffset, style, currentDayStyle) return diff
def get_files(self, file_paths): """ returns a list of files faster by using threads """ results = [] def get_file_thunk(path, interface): result = error = None try: result = interface.get_file(path) except Exception as err: error = err # important to print immediately because # errors are collected at the end print(err) content, encoding = result content = compression.decompress(content, encoding) results.append({ "filename": path, "content": content, "error": error, }) for path in file_paths: if len(self._threads): self.put(partial(get_file_thunk, path)) else: get_file_thunk(path, self._interface) desc = 'Downloading' if self.progress else None self.wait(desc) return results
def function[get_files, parameter[self, file_paths]]: constant[ returns a list of files faster by using threads ] variable[results] assign[=] list[[]] def function[get_file_thunk, parameter[path, interface]]: variable[result] assign[=] constant[None] <ast.Try object at 0x7da20c794250> <ast.Tuple object at 0x7da20c7943a0> assign[=] name[result] variable[content] assign[=] call[name[compression].decompress, parameter[name[content], name[encoding]]] call[name[results].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0ef6230>, <ast.Constant object at 0x7da1b0ef4490>, <ast.Constant object at 0x7da1b0ef7ac0>], [<ast.Name object at 0x7da1b0ef6620>, <ast.Name object at 0x7da1b0ef71c0>, <ast.Name object at 0x7da1b0ef6980>]]]] for taget[name[path]] in starred[name[file_paths]] begin[:] if call[name[len], parameter[name[self]._threads]] begin[:] call[name[self].put, parameter[call[name[partial], parameter[name[get_file_thunk], name[path]]]]] variable[desc] assign[=] <ast.IfExp object at 0x7da1b0ef55d0> call[name[self].wait, parameter[name[desc]]] return[name[results]]
keyword[def] identifier[get_files] ( identifier[self] , identifier[file_paths] ): literal[string] identifier[results] =[] keyword[def] identifier[get_file_thunk] ( identifier[path] , identifier[interface] ): identifier[result] = identifier[error] = keyword[None] keyword[try] : identifier[result] = identifier[interface] . identifier[get_file] ( identifier[path] ) keyword[except] identifier[Exception] keyword[as] identifier[err] : identifier[error] = identifier[err] identifier[print] ( identifier[err] ) identifier[content] , identifier[encoding] = identifier[result] identifier[content] = identifier[compression] . identifier[decompress] ( identifier[content] , identifier[encoding] ) identifier[results] . identifier[append] ({ literal[string] : identifier[path] , literal[string] : identifier[content] , literal[string] : identifier[error] , }) keyword[for] identifier[path] keyword[in] identifier[file_paths] : keyword[if] identifier[len] ( identifier[self] . identifier[_threads] ): identifier[self] . identifier[put] ( identifier[partial] ( identifier[get_file_thunk] , identifier[path] )) keyword[else] : identifier[get_file_thunk] ( identifier[path] , identifier[self] . identifier[_interface] ) identifier[desc] = literal[string] keyword[if] identifier[self] . identifier[progress] keyword[else] keyword[None] identifier[self] . identifier[wait] ( identifier[desc] ) keyword[return] identifier[results]
def get_files(self, file_paths): """ returns a list of files faster by using threads """ results = [] def get_file_thunk(path, interface): result = error = None try: result = interface.get_file(path) # depends on [control=['try'], data=[]] except Exception as err: error = err # important to print immediately because # errors are collected at the end print(err) # depends on [control=['except'], data=['err']] (content, encoding) = result content = compression.decompress(content, encoding) results.append({'filename': path, 'content': content, 'error': error}) for path in file_paths: if len(self._threads): self.put(partial(get_file_thunk, path)) # depends on [control=['if'], data=[]] else: get_file_thunk(path, self._interface) # depends on [control=['for'], data=['path']] desc = 'Downloading' if self.progress else None self.wait(desc) return results
def _check_symlink_ownership(path, user, group, win_owner): ''' Check if the symlink ownership matches the specified user and group ''' cur_user, cur_group = _get_symlink_ownership(path) if salt.utils.platform.is_windows(): return win_owner == cur_user else: return (cur_user == user) and (cur_group == group)
def function[_check_symlink_ownership, parameter[path, user, group, win_owner]]: constant[ Check if the symlink ownership matches the specified user and group ] <ast.Tuple object at 0x7da1b20ed990> assign[=] call[name[_get_symlink_ownership], parameter[name[path]]] if call[name[salt].utils.platform.is_windows, parameter[]] begin[:] return[compare[name[win_owner] equal[==] name[cur_user]]]
keyword[def] identifier[_check_symlink_ownership] ( identifier[path] , identifier[user] , identifier[group] , identifier[win_owner] ): literal[string] identifier[cur_user] , identifier[cur_group] = identifier[_get_symlink_ownership] ( identifier[path] ) keyword[if] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] (): keyword[return] identifier[win_owner] == identifier[cur_user] keyword[else] : keyword[return] ( identifier[cur_user] == identifier[user] ) keyword[and] ( identifier[cur_group] == identifier[group] )
def _check_symlink_ownership(path, user, group, win_owner): """ Check if the symlink ownership matches the specified user and group """ (cur_user, cur_group) = _get_symlink_ownership(path) if salt.utils.platform.is_windows(): return win_owner == cur_user # depends on [control=['if'], data=[]] else: return cur_user == user and cur_group == group
def read_playlist_file(self, stationFile=''): """ Read a csv file Returns: number x - number of stations or -1 - playlist is malformed -2 - playlist not found """ prev_file = self.stations_file prev_format = self.new_format self.new_format = False ret = 0 stationFile, ret = self._get_playlist_abspath_from_data(stationFile) if ret < 0: return ret self._reading_stations = [] with open(stationFile, 'r') as cfgfile: try: for row in csv.reader(filter(lambda row: row[0]!='#', cfgfile), skipinitialspace=True): if not row: continue try: name, url = [s.strip() for s in row] self._reading_stations.append((name, url, '')) except: name, url, enc = [s.strip() for s in row] self._reading_stations.append((name, url, enc)) self.new_format = True except: self._reading_stations = [] self.new_format = prev_format return -1 self.stations = list(self._reading_stations) self._reading_stations = [] self._get_playlist_elements(stationFile) self.previous_stations_file = prev_file self._is_playlist_in_config_dir() self.number_of_stations = len(self.stations) self.dirty_playlist = False if logger.isEnabledFor(logging.DEBUG): if self.new_format: logger.debug('Playlist is in new format') else: logger.debug('Playlist is in old format') return self.number_of_stations
def function[read_playlist_file, parameter[self, stationFile]]: constant[ Read a csv file Returns: number x - number of stations or -1 - playlist is malformed -2 - playlist not found ] variable[prev_file] assign[=] name[self].stations_file variable[prev_format] assign[=] name[self].new_format name[self].new_format assign[=] constant[False] variable[ret] assign[=] constant[0] <ast.Tuple object at 0x7da1b1038c40> assign[=] call[name[self]._get_playlist_abspath_from_data, parameter[name[stationFile]]] if compare[name[ret] less[<] constant[0]] begin[:] return[name[ret]] name[self]._reading_stations assign[=] list[[]] with call[name[open], parameter[name[stationFile], constant[r]]] begin[:] <ast.Try object at 0x7da1b103b550> name[self].stations assign[=] call[name[list], parameter[name[self]._reading_stations]] name[self]._reading_stations assign[=] list[[]] call[name[self]._get_playlist_elements, parameter[name[stationFile]]] name[self].previous_stations_file assign[=] name[prev_file] call[name[self]._is_playlist_in_config_dir, parameter[]] name[self].number_of_stations assign[=] call[name[len], parameter[name[self].stations]] name[self].dirty_playlist assign[=] constant[False] if call[name[logger].isEnabledFor, parameter[name[logging].DEBUG]] begin[:] if name[self].new_format begin[:] call[name[logger].debug, parameter[constant[Playlist is in new format]]] return[name[self].number_of_stations]
keyword[def] identifier[read_playlist_file] ( identifier[self] , identifier[stationFile] = literal[string] ): literal[string] identifier[prev_file] = identifier[self] . identifier[stations_file] identifier[prev_format] = identifier[self] . identifier[new_format] identifier[self] . identifier[new_format] = keyword[False] identifier[ret] = literal[int] identifier[stationFile] , identifier[ret] = identifier[self] . identifier[_get_playlist_abspath_from_data] ( identifier[stationFile] ) keyword[if] identifier[ret] < literal[int] : keyword[return] identifier[ret] identifier[self] . identifier[_reading_stations] =[] keyword[with] identifier[open] ( identifier[stationFile] , literal[string] ) keyword[as] identifier[cfgfile] : keyword[try] : keyword[for] identifier[row] keyword[in] identifier[csv] . identifier[reader] ( identifier[filter] ( keyword[lambda] identifier[row] : identifier[row] [ literal[int] ]!= literal[string] , identifier[cfgfile] ), identifier[skipinitialspace] = keyword[True] ): keyword[if] keyword[not] identifier[row] : keyword[continue] keyword[try] : identifier[name] , identifier[url] =[ identifier[s] . identifier[strip] () keyword[for] identifier[s] keyword[in] identifier[row] ] identifier[self] . identifier[_reading_stations] . identifier[append] (( identifier[name] , identifier[url] , literal[string] )) keyword[except] : identifier[name] , identifier[url] , identifier[enc] =[ identifier[s] . identifier[strip] () keyword[for] identifier[s] keyword[in] identifier[row] ] identifier[self] . identifier[_reading_stations] . identifier[append] (( identifier[name] , identifier[url] , identifier[enc] )) identifier[self] . identifier[new_format] = keyword[True] keyword[except] : identifier[self] . identifier[_reading_stations] =[] identifier[self] . identifier[new_format] = identifier[prev_format] keyword[return] - literal[int] identifier[self] . identifier[stations] = identifier[list] ( identifier[self] . identifier[_reading_stations] ) identifier[self] . identifier[_reading_stations] =[] identifier[self] . identifier[_get_playlist_elements] ( identifier[stationFile] ) identifier[self] . identifier[previous_stations_file] = identifier[prev_file] identifier[self] . identifier[_is_playlist_in_config_dir] () identifier[self] . identifier[number_of_stations] = identifier[len] ( identifier[self] . identifier[stations] ) identifier[self] . identifier[dirty_playlist] = keyword[False] keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ): keyword[if] identifier[self] . identifier[new_format] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[else] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[self] . identifier[number_of_stations]
def read_playlist_file(self, stationFile=''): """ Read a csv file Returns: number x - number of stations or -1 - playlist is malformed -2 - playlist not found """ prev_file = self.stations_file prev_format = self.new_format self.new_format = False ret = 0 (stationFile, ret) = self._get_playlist_abspath_from_data(stationFile) if ret < 0: return ret # depends on [control=['if'], data=['ret']] self._reading_stations = [] with open(stationFile, 'r') as cfgfile: try: for row in csv.reader(filter(lambda row: row[0] != '#', cfgfile), skipinitialspace=True): if not row: continue # depends on [control=['if'], data=[]] try: (name, url) = [s.strip() for s in row] self._reading_stations.append((name, url, '')) # depends on [control=['try'], data=[]] except: (name, url, enc) = [s.strip() for s in row] self._reading_stations.append((name, url, enc)) self.new_format = True # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['row']] # depends on [control=['try'], data=[]] except: self._reading_stations = [] self.new_format = prev_format return -1 # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['cfgfile']] self.stations = list(self._reading_stations) self._reading_stations = [] self._get_playlist_elements(stationFile) self.previous_stations_file = prev_file self._is_playlist_in_config_dir() self.number_of_stations = len(self.stations) self.dirty_playlist = False if logger.isEnabledFor(logging.DEBUG): if self.new_format: logger.debug('Playlist is in new format') # depends on [control=['if'], data=[]] else: logger.debug('Playlist is in old format') # depends on [control=['if'], data=[]] return self.number_of_stations
def plot_channel_sweep(proxy, start_channel): ''' Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`. ''' test_loads = TEST_LOADS.copy() test_loads.index += start_channel results = sweep_channels(proxy, test_loads) normalized_measurements = (results['measured capacitance'] / results['expected capacitance']) fig, axis = plt.subplots(figsize=(10, 8)) axis.bar(normalized_measurements.index - 0.3, normalized_measurements, width=0.6, edgecolor='none', facecolor='limegreen') axis.set_xlim(left=test_loads.index.min() - 0.5, right=test_loads.index.max() + 0.5) axis.set_xlabel('channel') axis.set_ylabel(r'$\frac{C_{\tt{measured}}}{C_{\tt{expected}}}$', fontsize=28) return results
def function[plot_channel_sweep, parameter[proxy, start_channel]]: constant[ Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`. ] variable[test_loads] assign[=] call[name[TEST_LOADS].copy, parameter[]] <ast.AugAssign object at 0x7da204623af0> variable[results] assign[=] call[name[sweep_channels], parameter[name[proxy], name[test_loads]]] variable[normalized_measurements] assign[=] binary_operation[call[name[results]][constant[measured capacitance]] / call[name[results]][constant[expected capacitance]]] <ast.Tuple object at 0x7da2046233d0> assign[=] call[name[plt].subplots, parameter[]] call[name[axis].bar, parameter[binary_operation[name[normalized_measurements].index - constant[0.3]], name[normalized_measurements]]] call[name[axis].set_xlim, parameter[]] call[name[axis].set_xlabel, parameter[constant[channel]]] call[name[axis].set_ylabel, parameter[constant[$\frac{C_{\tt{measured}}}{C_{\tt{expected}}}$]]] return[name[results]]
keyword[def] identifier[plot_channel_sweep] ( identifier[proxy] , identifier[start_channel] ): literal[string] identifier[test_loads] = identifier[TEST_LOADS] . identifier[copy] () identifier[test_loads] . identifier[index] += identifier[start_channel] identifier[results] = identifier[sweep_channels] ( identifier[proxy] , identifier[test_loads] ) identifier[normalized_measurements] =( identifier[results] [ literal[string] ] / identifier[results] [ literal[string] ]) identifier[fig] , identifier[axis] = identifier[plt] . identifier[subplots] ( identifier[figsize] =( literal[int] , literal[int] )) identifier[axis] . identifier[bar] ( identifier[normalized_measurements] . identifier[index] - literal[int] , identifier[normalized_measurements] , identifier[width] = literal[int] , identifier[edgecolor] = literal[string] , identifier[facecolor] = literal[string] ) identifier[axis] . identifier[set_xlim] ( identifier[left] = identifier[test_loads] . identifier[index] . identifier[min] ()- literal[int] , identifier[right] = identifier[test_loads] . identifier[index] . identifier[max] ()+ literal[int] ) identifier[axis] . identifier[set_xlabel] ( literal[string] ) identifier[axis] . identifier[set_ylabel] ( literal[string] , identifier[fontsize] = literal[int] ) keyword[return] identifier[results]
def plot_channel_sweep(proxy, start_channel): """ Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`. """ test_loads = TEST_LOADS.copy() test_loads.index += start_channel results = sweep_channels(proxy, test_loads) normalized_measurements = results['measured capacitance'] / results['expected capacitance'] (fig, axis) = plt.subplots(figsize=(10, 8)) axis.bar(normalized_measurements.index - 0.3, normalized_measurements, width=0.6, edgecolor='none', facecolor='limegreen') axis.set_xlim(left=test_loads.index.min() - 0.5, right=test_loads.index.max() + 0.5) axis.set_xlabel('channel') axis.set_ylabel('$\\frac{C_{\\tt{measured}}}{C_{\\tt{expected}}}$', fontsize=28) return results
def _organize_by_position(orig_file, cmp_file, chunk_size): """Read two CSV files of qualities, organizing values by position. """ with open(orig_file) as in_handle: reader1 = csv.reader(in_handle) positions = len(next(reader1)) - 1 for positions in _chunks(range(positions), chunk_size): with open(orig_file) as orig_handle: with open(cmp_file) as cmp_handle: orig_reader = csv.reader(orig_handle) cmp_reader = csv.reader(cmp_handle) for item in _counts_at_position(positions, orig_reader, cmp_reader): yield item
def function[_organize_by_position, parameter[orig_file, cmp_file, chunk_size]]: constant[Read two CSV files of qualities, organizing values by position. ] with call[name[open], parameter[name[orig_file]]] begin[:] variable[reader1] assign[=] call[name[csv].reader, parameter[name[in_handle]]] variable[positions] assign[=] binary_operation[call[name[len], parameter[call[name[next], parameter[name[reader1]]]]] - constant[1]] for taget[name[positions]] in starred[call[name[_chunks], parameter[call[name[range], parameter[name[positions]]], name[chunk_size]]]] begin[:] with call[name[open], parameter[name[orig_file]]] begin[:] with call[name[open], parameter[name[cmp_file]]] begin[:] variable[orig_reader] assign[=] call[name[csv].reader, parameter[name[orig_handle]]] variable[cmp_reader] assign[=] call[name[csv].reader, parameter[name[cmp_handle]]] for taget[name[item]] in starred[call[name[_counts_at_position], parameter[name[positions], name[orig_reader], name[cmp_reader]]]] begin[:] <ast.Yield object at 0x7da1b1986530>
keyword[def] identifier[_organize_by_position] ( identifier[orig_file] , identifier[cmp_file] , identifier[chunk_size] ): literal[string] keyword[with] identifier[open] ( identifier[orig_file] ) keyword[as] identifier[in_handle] : identifier[reader1] = identifier[csv] . identifier[reader] ( identifier[in_handle] ) identifier[positions] = identifier[len] ( identifier[next] ( identifier[reader1] ))- literal[int] keyword[for] identifier[positions] keyword[in] identifier[_chunks] ( identifier[range] ( identifier[positions] ), identifier[chunk_size] ): keyword[with] identifier[open] ( identifier[orig_file] ) keyword[as] identifier[orig_handle] : keyword[with] identifier[open] ( identifier[cmp_file] ) keyword[as] identifier[cmp_handle] : identifier[orig_reader] = identifier[csv] . identifier[reader] ( identifier[orig_handle] ) identifier[cmp_reader] = identifier[csv] . identifier[reader] ( identifier[cmp_handle] ) keyword[for] identifier[item] keyword[in] identifier[_counts_at_position] ( identifier[positions] , identifier[orig_reader] , identifier[cmp_reader] ): keyword[yield] identifier[item]
def _organize_by_position(orig_file, cmp_file, chunk_size): """Read two CSV files of qualities, organizing values by position. """ with open(orig_file) as in_handle: reader1 = csv.reader(in_handle) positions = len(next(reader1)) - 1 # depends on [control=['with'], data=['in_handle']] for positions in _chunks(range(positions), chunk_size): with open(orig_file) as orig_handle: with open(cmp_file) as cmp_handle: orig_reader = csv.reader(orig_handle) cmp_reader = csv.reader(cmp_handle) for item in _counts_at_position(positions, orig_reader, cmp_reader): yield item # depends on [control=['for'], data=['item']] # depends on [control=['with'], data=['cmp_handle']] # depends on [control=['with'], data=['open', 'orig_handle']] # depends on [control=['for'], data=['positions']]
def derivative(self, z, x, y, fase): """Wrapper derivative for custom derived properties where x, y, z can be: P, T, v, u, h, s, g, a""" return deriv_G(self, z, x, y, fase)
def function[derivative, parameter[self, z, x, y, fase]]: constant[Wrapper derivative for custom derived properties where x, y, z can be: P, T, v, u, h, s, g, a] return[call[name[deriv_G], parameter[name[self], name[z], name[x], name[y], name[fase]]]]
keyword[def] identifier[derivative] ( identifier[self] , identifier[z] , identifier[x] , identifier[y] , identifier[fase] ): literal[string] keyword[return] identifier[deriv_G] ( identifier[self] , identifier[z] , identifier[x] , identifier[y] , identifier[fase] )
def derivative(self, z, x, y, fase): """Wrapper derivative for custom derived properties where x, y, z can be: P, T, v, u, h, s, g, a""" return deriv_G(self, z, x, y, fase)
def _run_pants_with_retry(self, pantsd_handle, retries=3): """Runs pants remotely with retry and recovery for nascent executions. :param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to. """ attempt = 1 while 1: logger.debug( 'connecting to pantsd on port {} (attempt {}/{})' .format(pantsd_handle.port, attempt, retries) ) try: return self._connect_and_execute(pantsd_handle) except self.RECOVERABLE_EXCEPTIONS as e: if attempt > retries: raise self.Fallback(e) self._backoff(attempt) logger.warn( 'pantsd was unresponsive on port {}, retrying ({}/{})' .format(pantsd_handle.port, attempt, retries) ) # One possible cause of the daemon being non-responsive during an attempt might be if a # another lifecycle operation is happening concurrently (incl teardown). To account for # this, we won't begin attempting restarts until at least 1 second has passed (1 attempt). if attempt > 1: pantsd_handle = self._restart_pantsd() attempt += 1 except NailgunClient.NailgunError as e: # Ensure a newline. logger.fatal('') logger.fatal('lost active connection to pantsd!') raise_with_traceback(self._extract_remote_exception(pantsd_handle.pid, e))
def function[_run_pants_with_retry, parameter[self, pantsd_handle, retries]]: constant[Runs pants remotely with retry and recovery for nascent executions. :param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to. ] variable[attempt] assign[=] constant[1] while constant[1] begin[:] call[name[logger].debug, parameter[call[constant[connecting to pantsd on port {} (attempt {}/{})].format, parameter[name[pantsd_handle].port, name[attempt], name[retries]]]]] <ast.Try object at 0x7da1b1e8e530>
keyword[def] identifier[_run_pants_with_retry] ( identifier[self] , identifier[pantsd_handle] , identifier[retries] = literal[int] ): literal[string] identifier[attempt] = literal[int] keyword[while] literal[int] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[pantsd_handle] . identifier[port] , identifier[attempt] , identifier[retries] ) ) keyword[try] : keyword[return] identifier[self] . identifier[_connect_and_execute] ( identifier[pantsd_handle] ) keyword[except] identifier[self] . identifier[RECOVERABLE_EXCEPTIONS] keyword[as] identifier[e] : keyword[if] identifier[attempt] > identifier[retries] : keyword[raise] identifier[self] . identifier[Fallback] ( identifier[e] ) identifier[self] . identifier[_backoff] ( identifier[attempt] ) identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[pantsd_handle] . identifier[port] , identifier[attempt] , identifier[retries] ) ) keyword[if] identifier[attempt] > literal[int] : identifier[pantsd_handle] = identifier[self] . identifier[_restart_pantsd] () identifier[attempt] += literal[int] keyword[except] identifier[NailgunClient] . identifier[NailgunError] keyword[as] identifier[e] : identifier[logger] . identifier[fatal] ( literal[string] ) identifier[logger] . identifier[fatal] ( literal[string] ) identifier[raise_with_traceback] ( identifier[self] . identifier[_extract_remote_exception] ( identifier[pantsd_handle] . identifier[pid] , identifier[e] ))
def _run_pants_with_retry(self, pantsd_handle, retries=3): """Runs pants remotely with retry and recovery for nascent executions. :param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to. """ attempt = 1 while 1: logger.debug('connecting to pantsd on port {} (attempt {}/{})'.format(pantsd_handle.port, attempt, retries)) try: return self._connect_and_execute(pantsd_handle) # depends on [control=['try'], data=[]] except self.RECOVERABLE_EXCEPTIONS as e: if attempt > retries: raise self.Fallback(e) # depends on [control=['if'], data=[]] self._backoff(attempt) logger.warn('pantsd was unresponsive on port {}, retrying ({}/{})'.format(pantsd_handle.port, attempt, retries)) # One possible cause of the daemon being non-responsive during an attempt might be if a # another lifecycle operation is happening concurrently (incl teardown). To account for # this, we won't begin attempting restarts until at least 1 second has passed (1 attempt). if attempt > 1: pantsd_handle = self._restart_pantsd() # depends on [control=['if'], data=[]] attempt += 1 # depends on [control=['except'], data=['e']] except NailgunClient.NailgunError as e: # Ensure a newline. logger.fatal('') logger.fatal('lost active connection to pantsd!') raise_with_traceback(self._extract_remote_exception(pantsd_handle.pid, e)) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end): """ Build command to convert BAM file to FASTQ file(s) (R1/R2). :param str bam_file: path to BAM file with sequencing reads :param str out_fastq_pre: path prefix for output FASTQ file(s) :param bool paired_end: whether the given file contains paired-end or single-end sequencing reads :return str: file conversion command, ready to run """ self.make_sure_path_exists(os.path.dirname(out_fastq_pre)) cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " SamToFastq" cmd += " I=" + bam_file cmd += " F=" + out_fastq_pre + "_R1.fastq" if paired_end: cmd += " F2=" + out_fastq_pre + "_R2.fastq" cmd += " INCLUDE_NON_PF_READS=true" cmd += " QUIET=true" cmd += " VERBOSITY=ERROR" cmd += " VALIDATION_STRINGENCY=SILENT" return cmd
def function[bam_to_fastq, parameter[self, bam_file, out_fastq_pre, paired_end]]: constant[ Build command to convert BAM file to FASTQ file(s) (R1/R2). :param str bam_file: path to BAM file with sequencing reads :param str out_fastq_pre: path prefix for output FASTQ file(s) :param bool paired_end: whether the given file contains paired-end or single-end sequencing reads :return str: file conversion command, ready to run ] call[name[self].make_sure_path_exists, parameter[call[name[os].path.dirname, parameter[name[out_fastq_pre]]]]] variable[cmd] assign[=] binary_operation[binary_operation[name[self].tools.java + constant[ -Xmx]] + name[self].pm.javamem] <ast.AugAssign object at 0x7da1b03e0100> <ast.AugAssign object at 0x7da18ede5540> <ast.AugAssign object at 0x7da18ede5d50> if name[paired_end] begin[:] <ast.AugAssign object at 0x7da18ede5600> <ast.AugAssign object at 0x7da18ede40a0> <ast.AugAssign object at 0x7da18ede5150> <ast.AugAssign object at 0x7da18ede6680> <ast.AugAssign object at 0x7da18ede5cc0> return[name[cmd]]
keyword[def] identifier[bam_to_fastq] ( identifier[self] , identifier[bam_file] , identifier[out_fastq_pre] , identifier[paired_end] ): literal[string] identifier[self] . identifier[make_sure_path_exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[out_fastq_pre] )) identifier[cmd] = identifier[self] . identifier[tools] . identifier[java] + literal[string] + identifier[self] . identifier[pm] . identifier[javamem] identifier[cmd] += literal[string] + identifier[self] . identifier[tools] . identifier[picard] + literal[string] identifier[cmd] += literal[string] + identifier[bam_file] identifier[cmd] += literal[string] + identifier[out_fastq_pre] + literal[string] keyword[if] identifier[paired_end] : identifier[cmd] += literal[string] + identifier[out_fastq_pre] + literal[string] identifier[cmd] += literal[string] identifier[cmd] += literal[string] identifier[cmd] += literal[string] identifier[cmd] += literal[string] keyword[return] identifier[cmd]
def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end): """ Build command to convert BAM file to FASTQ file(s) (R1/R2). :param str bam_file: path to BAM file with sequencing reads :param str out_fastq_pre: path prefix for output FASTQ file(s) :param bool paired_end: whether the given file contains paired-end or single-end sequencing reads :return str: file conversion command, ready to run """ self.make_sure_path_exists(os.path.dirname(out_fastq_pre)) cmd = self.tools.java + ' -Xmx' + self.pm.javamem cmd += ' -jar ' + self.tools.picard + ' SamToFastq' cmd += ' I=' + bam_file cmd += ' F=' + out_fastq_pre + '_R1.fastq' if paired_end: cmd += ' F2=' + out_fastq_pre + '_R2.fastq' # depends on [control=['if'], data=[]] cmd += ' INCLUDE_NON_PF_READS=true' cmd += ' QUIET=true' cmd += ' VERBOSITY=ERROR' cmd += ' VALIDATION_STRINGENCY=SILENT' return cmd
def parse_address(address, language=None, country=None): """ Parse address into components. @param address: the address as either Unicode or a UTF-8 encoded string @param language (optional): language code @param country (optional): country code """ address = safe_decode(address, 'utf-8') return _parser.parse_address(address, language=language, country=country)
def function[parse_address, parameter[address, language, country]]: constant[ Parse address into components. @param address: the address as either Unicode or a UTF-8 encoded string @param language (optional): language code @param country (optional): country code ] variable[address] assign[=] call[name[safe_decode], parameter[name[address], constant[utf-8]]] return[call[name[_parser].parse_address, parameter[name[address]]]]
keyword[def] identifier[parse_address] ( identifier[address] , identifier[language] = keyword[None] , identifier[country] = keyword[None] ): literal[string] identifier[address] = identifier[safe_decode] ( identifier[address] , literal[string] ) keyword[return] identifier[_parser] . identifier[parse_address] ( identifier[address] , identifier[language] = identifier[language] , identifier[country] = identifier[country] )
def parse_address(address, language=None, country=None): """ Parse address into components. @param address: the address as either Unicode or a UTF-8 encoded string @param language (optional): language code @param country (optional): country code """ address = safe_decode(address, 'utf-8') return _parser.parse_address(address, language=language, country=country)
def get_day_start_ut(self, date): """ Get day start time (as specified by GTFS) as unix time in seconds Parameters ---------- date : str | unicode | datetime.datetime something describing the date Returns ------- day_start_ut : int start time of the day in unixtime """ if isinstance(date, string_types): date = datetime.datetime.strptime(date, '%Y-%m-%d') date_noon = datetime.datetime(date.year, date.month, date.day, 12, 0, 0) ut_noon = self.unlocalized_datetime_to_ut_seconds(date_noon) return ut_noon - 12 * 60 * 60
def function[get_day_start_ut, parameter[self, date]]: constant[ Get day start time (as specified by GTFS) as unix time in seconds Parameters ---------- date : str | unicode | datetime.datetime something describing the date Returns ------- day_start_ut : int start time of the day in unixtime ] if call[name[isinstance], parameter[name[date], name[string_types]]] begin[:] variable[date] assign[=] call[name[datetime].datetime.strptime, parameter[name[date], constant[%Y-%m-%d]]] variable[date_noon] assign[=] call[name[datetime].datetime, parameter[name[date].year, name[date].month, name[date].day, constant[12], constant[0], constant[0]]] variable[ut_noon] assign[=] call[name[self].unlocalized_datetime_to_ut_seconds, parameter[name[date_noon]]] return[binary_operation[name[ut_noon] - binary_operation[binary_operation[constant[12] * constant[60]] * constant[60]]]]
keyword[def] identifier[get_day_start_ut] ( identifier[self] , identifier[date] ): literal[string] keyword[if] identifier[isinstance] ( identifier[date] , identifier[string_types] ): identifier[date] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[date] , literal[string] ) identifier[date_noon] = identifier[datetime] . identifier[datetime] ( identifier[date] . identifier[year] , identifier[date] . identifier[month] , identifier[date] . identifier[day] , literal[int] , literal[int] , literal[int] ) identifier[ut_noon] = identifier[self] . identifier[unlocalized_datetime_to_ut_seconds] ( identifier[date_noon] ) keyword[return] identifier[ut_noon] - literal[int] * literal[int] * literal[int]
def get_day_start_ut(self, date): """ Get day start time (as specified by GTFS) as unix time in seconds Parameters ---------- date : str | unicode | datetime.datetime something describing the date Returns ------- day_start_ut : int start time of the day in unixtime """ if isinstance(date, string_types): date = datetime.datetime.strptime(date, '%Y-%m-%d') # depends on [control=['if'], data=[]] date_noon = datetime.datetime(date.year, date.month, date.day, 12, 0, 0) ut_noon = self.unlocalized_datetime_to_ut_seconds(date_noon) return ut_noon - 12 * 60 * 60
def gemset_list(ruby='default', runas=None): ''' List all gemsets for the given ruby. ruby : default The ruby version for which to list the gemsets runas The user under which to run rvm. If not specified, then rvm will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rvm.gemset_list ''' gemsets = [] output = _rvm_do(ruby, ['rvm', 'gemset', 'list'], runas=runas) if output: regex = re.compile('^ ([^ ]+)') for line in output.splitlines(): match = regex.match(line) if match: gemsets.append(match.group(1)) return gemsets
def function[gemset_list, parameter[ruby, runas]]: constant[ List all gemsets for the given ruby. ruby : default The ruby version for which to list the gemsets runas The user under which to run rvm. If not specified, then rvm will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rvm.gemset_list ] variable[gemsets] assign[=] list[[]] variable[output] assign[=] call[name[_rvm_do], parameter[name[ruby], list[[<ast.Constant object at 0x7da204621270>, <ast.Constant object at 0x7da204623070>, <ast.Constant object at 0x7da204620700>]]]] if name[output] begin[:] variable[regex] assign[=] call[name[re].compile, parameter[constant[^ ([^ ]+)]]] for taget[name[line]] in starred[call[name[output].splitlines, parameter[]]] begin[:] variable[match] assign[=] call[name[regex].match, parameter[name[line]]] if name[match] begin[:] call[name[gemsets].append, parameter[call[name[match].group, parameter[constant[1]]]]] return[name[gemsets]]
keyword[def] identifier[gemset_list] ( identifier[ruby] = literal[string] , identifier[runas] = keyword[None] ): literal[string] identifier[gemsets] =[] identifier[output] = identifier[_rvm_do] ( identifier[ruby] ,[ literal[string] , literal[string] , literal[string] ], identifier[runas] = identifier[runas] ) keyword[if] identifier[output] : identifier[regex] = identifier[re] . identifier[compile] ( literal[string] ) keyword[for] identifier[line] keyword[in] identifier[output] . identifier[splitlines] (): identifier[match] = identifier[regex] . identifier[match] ( identifier[line] ) keyword[if] identifier[match] : identifier[gemsets] . identifier[append] ( identifier[match] . identifier[group] ( literal[int] )) keyword[return] identifier[gemsets]
def gemset_list(ruby='default', runas=None): """ List all gemsets for the given ruby. ruby : default The ruby version for which to list the gemsets runas The user under which to run rvm. If not specified, then rvm will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rvm.gemset_list """ gemsets = [] output = _rvm_do(ruby, ['rvm', 'gemset', 'list'], runas=runas) if output: regex = re.compile('^ ([^ ]+)') for line in output.splitlines(): match = regex.match(line) if match: gemsets.append(match.group(1)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] return gemsets